xref: /linux/drivers/mmc/core/queue.c (revision c79c3c34f75d72a066e292b10aa50fc758c97c89)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2003 Russell King, All Rights Reserved.
4  *  Copyright 2006-2007 Pierre Ossman
5  */
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/freezer.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/backing-dev.h>
13 
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/host.h>
16 
17 #include "queue.h"
18 #include "block.h"
19 #include "core.h"
20 #include "card.h"
21 #include "crypto.h"
22 #include "host.h"
23 
24 #define MMC_DMA_MAP_MERGE_SEGMENTS	512
25 
26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
27 {
28 	/* Allow only 1 DCMD at a time */
29 	return mq->in_flight[MMC_ISSUE_DCMD];
30 }
31 
32 void mmc_cqe_check_busy(struct mmc_queue *mq)
33 {
34 	if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 		mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
36 }
37 
38 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
39 {
40 	return host->caps2 & MMC_CAP2_CQE_DCMD;
41 }
42 
43 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
44 					      struct request *req)
45 {
46 	switch (req_op(req)) {
47 	case REQ_OP_DRV_IN:
48 	case REQ_OP_DRV_OUT:
49 	case REQ_OP_DISCARD:
50 	case REQ_OP_SECURE_ERASE:
51 		return MMC_ISSUE_SYNC;
52 	case REQ_OP_FLUSH:
53 		return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
54 	default:
55 		return MMC_ISSUE_ASYNC;
56 	}
57 }
58 
59 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
60 {
61 	struct mmc_host *host = mq->card->host;
62 
63 	if (mq->use_cqe && !host->hsq_enabled)
64 		return mmc_cqe_issue_type(host, req);
65 
66 	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
67 		return MMC_ISSUE_ASYNC;
68 
69 	return MMC_ISSUE_SYNC;
70 }
71 
72 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
73 {
74 	if (!mq->recovery_needed) {
75 		mq->recovery_needed = true;
76 		schedule_work(&mq->recovery_work);
77 	}
78 }
79 
80 void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
81 {
82 	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
83 						  brq.mrq);
84 	struct request *req = mmc_queue_req_to_req(mqrq);
85 	struct request_queue *q = req->q;
86 	struct mmc_queue *mq = q->queuedata;
87 	unsigned long flags;
88 
89 	spin_lock_irqsave(&mq->lock, flags);
90 	__mmc_cqe_recovery_notifier(mq);
91 	spin_unlock_irqrestore(&mq->lock, flags);
92 }
93 
94 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
95 {
96 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
97 	struct mmc_request *mrq = &mqrq->brq.mrq;
98 	struct mmc_queue *mq = req->q->queuedata;
99 	struct mmc_host *host = mq->card->host;
100 	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
101 	bool recovery_needed = false;
102 
103 	switch (issue_type) {
104 	case MMC_ISSUE_ASYNC:
105 	case MMC_ISSUE_DCMD:
106 		if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
107 			if (recovery_needed)
108 				mmc_cqe_recovery_notifier(mrq);
109 			return BLK_EH_RESET_TIMER;
110 		}
111 		/* The request has gone already */
112 		return BLK_EH_DONE;
113 	default:
114 		/* Timeout is handled by mmc core */
115 		return BLK_EH_RESET_TIMER;
116 	}
117 }
118 
119 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
120 						 bool reserved)
121 {
122 	struct request_queue *q = req->q;
123 	struct mmc_queue *mq = q->queuedata;
124 	struct mmc_card *card = mq->card;
125 	struct mmc_host *host = card->host;
126 	unsigned long flags;
127 	bool ignore_tout;
128 
129 	spin_lock_irqsave(&mq->lock, flags);
130 	ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
131 	spin_unlock_irqrestore(&mq->lock, flags);
132 
133 	return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
134 }
135 
136 static void mmc_mq_recovery_handler(struct work_struct *work)
137 {
138 	struct mmc_queue *mq = container_of(work, struct mmc_queue,
139 					    recovery_work);
140 	struct request_queue *q = mq->queue;
141 	struct mmc_host *host = mq->card->host;
142 
143 	mmc_get_card(mq->card, &mq->ctx);
144 
145 	mq->in_recovery = true;
146 
147 	if (mq->use_cqe && !host->hsq_enabled)
148 		mmc_blk_cqe_recovery(mq);
149 	else
150 		mmc_blk_mq_recovery(mq);
151 
152 	mq->in_recovery = false;
153 
154 	spin_lock_irq(&mq->lock);
155 	mq->recovery_needed = false;
156 	spin_unlock_irq(&mq->lock);
157 
158 	if (host->hsq_enabled)
159 		host->cqe_ops->cqe_recovery_finish(host);
160 
161 	mmc_put_card(mq->card, &mq->ctx);
162 
163 	blk_mq_run_hw_queues(q, true);
164 }
165 
166 static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
167 {
168 	struct scatterlist *sg;
169 
170 	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
171 	if (sg)
172 		sg_init_table(sg, sg_len);
173 
174 	return sg;
175 }
176 
177 static void mmc_queue_setup_discard(struct request_queue *q,
178 				    struct mmc_card *card)
179 {
180 	unsigned max_discard;
181 
182 	max_discard = mmc_calc_max_discard(card);
183 	if (!max_discard)
184 		return;
185 
186 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
187 	blk_queue_max_discard_sectors(q, max_discard);
188 	q->limits.discard_granularity = card->pref_erase << 9;
189 	/* granularity must not be greater than max. discard */
190 	if (card->pref_erase > max_discard)
191 		q->limits.discard_granularity = SECTOR_SIZE;
192 	if (mmc_can_secure_erase_trim(card))
193 		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
194 }
195 
196 static unsigned int mmc_get_max_segments(struct mmc_host *host)
197 {
198 	return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
199 					 host->max_segs;
200 }
201 
202 /**
203  * mmc_init_request() - initialize the MMC-specific per-request data
204  * @mq: the request queue
205  * @req: the request
206  * @gfp: memory allocation policy
207  */
208 static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
209 			      gfp_t gfp)
210 {
211 	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
212 	struct mmc_card *card = mq->card;
213 	struct mmc_host *host = card->host;
214 
215 	mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
216 	if (!mq_rq->sg)
217 		return -ENOMEM;
218 
219 	return 0;
220 }
221 
222 static void mmc_exit_request(struct request_queue *q, struct request *req)
223 {
224 	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
225 
226 	kfree(mq_rq->sg);
227 	mq_rq->sg = NULL;
228 }
229 
230 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
231 			       unsigned int hctx_idx, unsigned int numa_node)
232 {
233 	return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
234 }
235 
236 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
237 				unsigned int hctx_idx)
238 {
239 	struct mmc_queue *mq = set->driver_data;
240 
241 	mmc_exit_request(mq->queue, req);
242 }
243 
244 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
245 				    const struct blk_mq_queue_data *bd)
246 {
247 	struct request *req = bd->rq;
248 	struct request_queue *q = req->q;
249 	struct mmc_queue *mq = q->queuedata;
250 	struct mmc_card *card = mq->card;
251 	struct mmc_host *host = card->host;
252 	enum mmc_issue_type issue_type;
253 	enum mmc_issued issued;
254 	bool get_card, cqe_retune_ok;
255 	int ret;
256 
257 	if (mmc_card_removed(mq->card)) {
258 		req->rq_flags |= RQF_QUIET;
259 		return BLK_STS_IOERR;
260 	}
261 
262 	issue_type = mmc_issue_type(mq, req);
263 
264 	spin_lock_irq(&mq->lock);
265 
266 	if (mq->recovery_needed || mq->busy) {
267 		spin_unlock_irq(&mq->lock);
268 		return BLK_STS_RESOURCE;
269 	}
270 
271 	switch (issue_type) {
272 	case MMC_ISSUE_DCMD:
273 		if (mmc_cqe_dcmd_busy(mq)) {
274 			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
275 			spin_unlock_irq(&mq->lock);
276 			return BLK_STS_RESOURCE;
277 		}
278 		break;
279 	case MMC_ISSUE_ASYNC:
280 		/*
281 		 * For MMC host software queue, we only allow 2 requests in
282 		 * flight to avoid a long latency.
283 		 */
284 		if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
285 			spin_unlock_irq(&mq->lock);
286 			return BLK_STS_RESOURCE;
287 		}
288 		break;
289 	default:
290 		/*
291 		 * Timeouts are handled by mmc core, and we don't have a host
292 		 * API to abort requests, so we can't handle the timeout anyway.
293 		 * However, when the timeout happens, blk_mq_complete_request()
294 		 * no longer works (to stop the request disappearing under us).
295 		 * To avoid racing with that, set a large timeout.
296 		 */
297 		req->timeout = 600 * HZ;
298 		break;
299 	}
300 
301 	/* Parallel dispatch of requests is not supported at the moment */
302 	mq->busy = true;
303 
304 	mq->in_flight[issue_type] += 1;
305 	get_card = (mmc_tot_in_flight(mq) == 1);
306 	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
307 
308 	spin_unlock_irq(&mq->lock);
309 
310 	if (!(req->rq_flags & RQF_DONTPREP)) {
311 		req_to_mmc_queue_req(req)->retries = 0;
312 		req->rq_flags |= RQF_DONTPREP;
313 	}
314 
315 	if (get_card)
316 		mmc_get_card(card, &mq->ctx);
317 
318 	if (mq->use_cqe) {
319 		host->retune_now = host->need_retune && cqe_retune_ok &&
320 				   !host->hold_retune;
321 	}
322 
323 	blk_mq_start_request(req);
324 
325 	issued = mmc_blk_mq_issue_rq(mq, req);
326 
327 	switch (issued) {
328 	case MMC_REQ_BUSY:
329 		ret = BLK_STS_RESOURCE;
330 		break;
331 	case MMC_REQ_FAILED_TO_START:
332 		ret = BLK_STS_IOERR;
333 		break;
334 	default:
335 		ret = BLK_STS_OK;
336 		break;
337 	}
338 
339 	if (issued != MMC_REQ_STARTED) {
340 		bool put_card = false;
341 
342 		spin_lock_irq(&mq->lock);
343 		mq->in_flight[issue_type] -= 1;
344 		if (mmc_tot_in_flight(mq) == 0)
345 			put_card = true;
346 		mq->busy = false;
347 		spin_unlock_irq(&mq->lock);
348 		if (put_card)
349 			mmc_put_card(card, &mq->ctx);
350 	} else {
351 		WRITE_ONCE(mq->busy, false);
352 	}
353 
354 	return ret;
355 }
356 
357 static const struct blk_mq_ops mmc_mq_ops = {
358 	.queue_rq	= mmc_mq_queue_rq,
359 	.init_request	= mmc_mq_init_request,
360 	.exit_request	= mmc_mq_exit_request,
361 	.complete	= mmc_blk_mq_complete,
362 	.timeout	= mmc_mq_timed_out,
363 };
364 
365 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
366 {
367 	struct mmc_host *host = card->host;
368 	unsigned block_size = 512;
369 
370 	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
371 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
372 	if (mmc_can_erase(card))
373 		mmc_queue_setup_discard(mq->queue, card);
374 
375 	if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
376 		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
377 	blk_queue_max_hw_sectors(mq->queue,
378 		min(host->max_blk_count, host->max_req_size / 512));
379 	if (host->can_dma_map_merge)
380 		WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
381 							mmc_dev(host)),
382 		     "merging was advertised but not possible");
383 	blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
384 
385 	if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
386 		block_size = card->ext_csd.data_sector_size;
387 		WARN_ON(block_size != 512 && block_size != 4096);
388 	}
389 
390 	blk_queue_logical_block_size(mq->queue, block_size);
391 	/*
392 	 * After blk_queue_can_use_dma_map_merging() was called with succeed,
393 	 * since it calls blk_queue_virt_boundary(), the mmc should not call
394 	 * both blk_queue_max_segment_size().
395 	 */
396 	if (!host->can_dma_map_merge)
397 		blk_queue_max_segment_size(mq->queue,
398 			round_down(host->max_seg_size, block_size));
399 
400 	dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
401 
402 	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
403 	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
404 
405 	mutex_init(&mq->complete_lock);
406 
407 	init_waitqueue_head(&mq->wait);
408 
409 	mmc_crypto_setup_queue(mq->queue, host);
410 }
411 
412 static inline bool mmc_merge_capable(struct mmc_host *host)
413 {
414 	return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
415 }
416 
417 /* Set queue depth to get a reasonable value for q->nr_requests */
418 #define MMC_QUEUE_DEPTH 64
419 
420 /**
421  * mmc_init_queue - initialise a queue structure.
422  * @mq: mmc queue
423  * @card: mmc card to attach this queue
424  *
425  * Initialise a MMC card request queue.
426  */
427 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
428 {
429 	struct mmc_host *host = card->host;
430 	int ret;
431 
432 	mq->card = card;
433 	mq->use_cqe = host->cqe_enabled;
434 
435 	spin_lock_init(&mq->lock);
436 
437 	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
438 	mq->tag_set.ops = &mmc_mq_ops;
439 	/*
440 	 * The queue depth for CQE must match the hardware because the request
441 	 * tag is used to index the hardware queue.
442 	 */
443 	if (mq->use_cqe && !host->hsq_enabled)
444 		mq->tag_set.queue_depth =
445 			min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
446 	else
447 		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
448 	mq->tag_set.numa_node = NUMA_NO_NODE;
449 	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
450 	mq->tag_set.nr_hw_queues = 1;
451 	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
452 	mq->tag_set.driver_data = mq;
453 
454 	/*
455 	 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
456 	 * the host->can_dma_map_merge should be set before to get max_segs
457 	 * from mmc_get_max_segments().
458 	 */
459 	if (mmc_merge_capable(host) &&
460 	    host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
461 	    dma_get_merge_boundary(mmc_dev(host)))
462 		host->can_dma_map_merge = 1;
463 	else
464 		host->can_dma_map_merge = 0;
465 
466 	ret = blk_mq_alloc_tag_set(&mq->tag_set);
467 	if (ret)
468 		return ret;
469 
470 	mq->queue = blk_mq_init_queue(&mq->tag_set);
471 	if (IS_ERR(mq->queue)) {
472 		ret = PTR_ERR(mq->queue);
473 		goto free_tag_set;
474 	}
475 
476 	if (mmc_host_is_spi(host) && host->use_spi_crc)
477 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
478 
479 	mq->queue->queuedata = mq;
480 	blk_queue_rq_timeout(mq->queue, 60 * HZ);
481 
482 	mmc_setup_queue(mq, card);
483 	return 0;
484 
485 free_tag_set:
486 	blk_mq_free_tag_set(&mq->tag_set);
487 	return ret;
488 }
489 
490 void mmc_queue_suspend(struct mmc_queue *mq)
491 {
492 	blk_mq_quiesce_queue(mq->queue);
493 
494 	/*
495 	 * The host remains claimed while there are outstanding requests, so
496 	 * simply claiming and releasing here ensures there are none.
497 	 */
498 	mmc_claim_host(mq->card->host);
499 	mmc_release_host(mq->card->host);
500 }
501 
502 void mmc_queue_resume(struct mmc_queue *mq)
503 {
504 	blk_mq_unquiesce_queue(mq->queue);
505 }
506 
507 void mmc_cleanup_queue(struct mmc_queue *mq)
508 {
509 	struct request_queue *q = mq->queue;
510 
511 	/*
512 	 * The legacy code handled the possibility of being suspended,
513 	 * so do that here too.
514 	 */
515 	if (blk_queue_quiesced(q))
516 		blk_mq_unquiesce_queue(q);
517 
518 	blk_cleanup_queue(q);
519 	blk_mq_free_tag_set(&mq->tag_set);
520 
521 	/*
522 	 * A request can be completed before the next request, potentially
523 	 * leaving a complete_work with nothing to do. Such a work item might
524 	 * still be queued at this point. Flush it.
525 	 */
526 	flush_work(&mq->complete_work);
527 
528 	mq->card = NULL;
529 }
530 
531 /*
532  * Prepare the sg list(s) to be handed of to the host driver
533  */
534 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
535 {
536 	struct request *req = mmc_queue_req_to_req(mqrq);
537 
538 	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
539 }
540