1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2003 Russell King, All Rights Reserved. 4 * Copyright 2006-2007 Pierre Ossman 5 */ 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/blkdev.h> 9 #include <linux/freezer.h> 10 #include <linux/scatterlist.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/backing-dev.h> 13 14 #include <linux/mmc/card.h> 15 #include <linux/mmc/host.h> 16 17 #include "queue.h" 18 #include "block.h" 19 #include "core.h" 20 #include "card.h" 21 #include "crypto.h" 22 #include "host.h" 23 24 #define MMC_DMA_MAP_MERGE_SEGMENTS 512 25 26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) 27 { 28 /* Allow only 1 DCMD at a time */ 29 return mq->in_flight[MMC_ISSUE_DCMD]; 30 } 31 32 void mmc_cqe_check_busy(struct mmc_queue *mq) 33 { 34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) 35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; 36 } 37 38 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) 39 { 40 return host->caps2 & MMC_CAP2_CQE_DCMD; 41 } 42 43 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, 44 struct request *req) 45 { 46 switch (req_op(req)) { 47 case REQ_OP_DRV_IN: 48 case REQ_OP_DRV_OUT: 49 case REQ_OP_DISCARD: 50 case REQ_OP_SECURE_ERASE: 51 case REQ_OP_WRITE_ZEROES: 52 return MMC_ISSUE_SYNC; 53 case REQ_OP_FLUSH: 54 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; 55 default: 56 return MMC_ISSUE_ASYNC; 57 } 58 } 59 60 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) 61 { 62 struct mmc_host *host = mq->card->host; 63 64 if (host->cqe_enabled && !host->hsq_enabled) 65 return mmc_cqe_issue_type(host, req); 66 67 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) 68 return MMC_ISSUE_ASYNC; 69 70 return MMC_ISSUE_SYNC; 71 } 72 73 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) 74 { 75 if (!mq->recovery_needed) { 76 mq->recovery_needed = true; 77 schedule_work(&mq->recovery_work); 78 } 79 } 80 81 void mmc_cqe_recovery_notifier(struct mmc_request *mrq) 82 { 83 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 84 brq.mrq); 85 struct request *req = mmc_queue_req_to_req(mqrq); 86 struct request_queue *q = req->q; 87 struct mmc_queue *mq = q->queuedata; 88 unsigned long flags; 89 90 spin_lock_irqsave(&mq->lock, flags); 91 __mmc_cqe_recovery_notifier(mq); 92 spin_unlock_irqrestore(&mq->lock, flags); 93 } 94 95 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) 96 { 97 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 98 struct mmc_request *mrq = &mqrq->brq.mrq; 99 struct mmc_queue *mq = req->q->queuedata; 100 struct mmc_host *host = mq->card->host; 101 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 102 bool recovery_needed = false; 103 104 switch (issue_type) { 105 case MMC_ISSUE_ASYNC: 106 case MMC_ISSUE_DCMD: 107 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { 108 if (recovery_needed) 109 mmc_cqe_recovery_notifier(mrq); 110 return BLK_EH_RESET_TIMER; 111 } 112 /* The request has gone already */ 113 return BLK_EH_DONE; 114 default: 115 /* Timeout is handled by mmc core */ 116 return BLK_EH_RESET_TIMER; 117 } 118 } 119 120 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req) 121 { 122 struct request_queue *q = req->q; 123 struct mmc_queue *mq = q->queuedata; 124 struct mmc_card *card = mq->card; 125 struct mmc_host *host = card->host; 126 unsigned long flags; 127 bool ignore_tout; 128 129 spin_lock_irqsave(&mq->lock, flags); 130 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled; 131 spin_unlock_irqrestore(&mq->lock, flags); 132 133 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); 134 } 135 136 static void mmc_mq_recovery_handler(struct work_struct *work) 137 { 138 struct mmc_queue *mq = container_of(work, struct mmc_queue, 139 recovery_work); 140 struct request_queue *q = mq->queue; 141 struct mmc_host *host = mq->card->host; 142 143 mmc_get_card(mq->card, &mq->ctx); 144 145 mq->in_recovery = true; 146 147 if (host->cqe_enabled && !host->hsq_enabled) 148 mmc_blk_cqe_recovery(mq); 149 else 150 mmc_blk_mq_recovery(mq); 151 152 mq->in_recovery = false; 153 154 spin_lock_irq(&mq->lock); 155 mq->recovery_needed = false; 156 spin_unlock_irq(&mq->lock); 157 158 if (host->hsq_enabled) 159 host->cqe_ops->cqe_recovery_finish(host); 160 161 mmc_put_card(mq->card, &mq->ctx); 162 163 blk_mq_run_hw_queues(q, true); 164 } 165 166 static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp) 167 { 168 struct scatterlist *sg; 169 170 sg = kmalloc_array(sg_len, sizeof(*sg), gfp); 171 if (sg) 172 sg_init_table(sg, sg_len); 173 174 return sg; 175 } 176 177 static void mmc_queue_setup_discard(struct mmc_card *card, 178 struct queue_limits *lim) 179 { 180 unsigned max_discard; 181 182 max_discard = mmc_calc_max_discard(card); 183 if (!max_discard) 184 return; 185 186 lim->max_hw_discard_sectors = max_discard; 187 if (mmc_can_secure_erase_trim(card)) 188 lim->max_secure_erase_sectors = max_discard; 189 if (mmc_can_trim(card) && card->erased_byte == 0) 190 lim->max_write_zeroes_sectors = max_discard; 191 192 /* granularity must not be greater than max. discard */ 193 if (card->pref_erase > max_discard) 194 lim->discard_granularity = SECTOR_SIZE; 195 else 196 lim->discard_granularity = card->pref_erase << 9; 197 } 198 199 static unsigned short mmc_get_max_segments(struct mmc_host *host) 200 { 201 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : 202 host->max_segs; 203 } 204 205 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, 206 unsigned int hctx_idx, unsigned int numa_node) 207 { 208 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 209 struct mmc_queue *mq = set->driver_data; 210 struct mmc_card *card = mq->card; 211 struct mmc_host *host = card->host; 212 213 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL); 214 if (!mq_rq->sg) 215 return -ENOMEM; 216 217 return 0; 218 } 219 220 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, 221 unsigned int hctx_idx) 222 { 223 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 224 225 kfree(mq_rq->sg); 226 mq_rq->sg = NULL; 227 } 228 229 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 230 const struct blk_mq_queue_data *bd) 231 { 232 struct request *req = bd->rq; 233 struct request_queue *q = req->q; 234 struct mmc_queue *mq = q->queuedata; 235 struct mmc_card *card = mq->card; 236 struct mmc_host *host = card->host; 237 enum mmc_issue_type issue_type; 238 enum mmc_issued issued; 239 bool get_card, cqe_retune_ok; 240 blk_status_t ret; 241 242 if (mmc_card_removed(mq->card)) { 243 req->rq_flags |= RQF_QUIET; 244 return BLK_STS_IOERR; 245 } 246 247 issue_type = mmc_issue_type(mq, req); 248 249 spin_lock_irq(&mq->lock); 250 251 if (mq->recovery_needed || mq->busy) { 252 spin_unlock_irq(&mq->lock); 253 return BLK_STS_RESOURCE; 254 } 255 256 switch (issue_type) { 257 case MMC_ISSUE_DCMD: 258 if (mmc_cqe_dcmd_busy(mq)) { 259 mq->cqe_busy |= MMC_CQE_DCMD_BUSY; 260 spin_unlock_irq(&mq->lock); 261 return BLK_STS_RESOURCE; 262 } 263 break; 264 case MMC_ISSUE_ASYNC: 265 if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) { 266 spin_unlock_irq(&mq->lock); 267 return BLK_STS_RESOURCE; 268 } 269 break; 270 default: 271 /* 272 * Timeouts are handled by mmc core, and we don't have a host 273 * API to abort requests, so we can't handle the timeout anyway. 274 * However, when the timeout happens, blk_mq_complete_request() 275 * no longer works (to stop the request disappearing under us). 276 * To avoid racing with that, set a large timeout. 277 */ 278 req->timeout = 600 * HZ; 279 break; 280 } 281 282 /* Parallel dispatch of requests is not supported at the moment */ 283 mq->busy = true; 284 285 mq->in_flight[issue_type] += 1; 286 get_card = (mmc_tot_in_flight(mq) == 1); 287 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); 288 289 spin_unlock_irq(&mq->lock); 290 291 if (!(req->rq_flags & RQF_DONTPREP)) { 292 req_to_mmc_queue_req(req)->retries = 0; 293 req->rq_flags |= RQF_DONTPREP; 294 } 295 296 if (get_card) 297 mmc_get_card(card, &mq->ctx); 298 299 if (host->cqe_enabled) { 300 host->retune_now = host->need_retune && cqe_retune_ok && 301 !host->hold_retune; 302 } 303 304 blk_mq_start_request(req); 305 306 issued = mmc_blk_mq_issue_rq(mq, req); 307 308 switch (issued) { 309 case MMC_REQ_BUSY: 310 ret = BLK_STS_RESOURCE; 311 break; 312 case MMC_REQ_FAILED_TO_START: 313 ret = BLK_STS_IOERR; 314 break; 315 default: 316 ret = BLK_STS_OK; 317 break; 318 } 319 320 if (issued != MMC_REQ_STARTED) { 321 bool put_card = false; 322 323 spin_lock_irq(&mq->lock); 324 mq->in_flight[issue_type] -= 1; 325 if (mmc_tot_in_flight(mq) == 0) 326 put_card = true; 327 mq->busy = false; 328 spin_unlock_irq(&mq->lock); 329 if (put_card) 330 mmc_put_card(card, &mq->ctx); 331 } else { 332 WRITE_ONCE(mq->busy, false); 333 } 334 335 return ret; 336 } 337 338 static const struct blk_mq_ops mmc_mq_ops = { 339 .queue_rq = mmc_mq_queue_rq, 340 .init_request = mmc_mq_init_request, 341 .exit_request = mmc_mq_exit_request, 342 .complete = mmc_blk_mq_complete, 343 .timeout = mmc_mq_timed_out, 344 }; 345 346 static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, 347 struct mmc_card *card, unsigned int features) 348 { 349 struct mmc_host *host = card->host; 350 struct queue_limits lim = { 351 .features = features, 352 }; 353 struct gendisk *disk; 354 355 if (mmc_can_erase(card)) 356 mmc_queue_setup_discard(card, &lim); 357 358 lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); 359 360 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) 361 lim.logical_block_size = card->ext_csd.data_sector_size; 362 else 363 lim.logical_block_size = 512; 364 365 WARN_ON_ONCE(lim.logical_block_size != 512 && 366 lim.logical_block_size != 4096); 367 368 /* 369 * Setting a virt_boundary implicity sets a max_segment_size, so try 370 * to set the hardware one here. 371 */ 372 if (host->can_dma_map_merge) { 373 lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host)); 374 lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS; 375 } else { 376 lim.max_segment_size = 377 round_down(host->max_seg_size, lim.logical_block_size); 378 lim.max_segments = host->max_segs; 379 } 380 381 if (mmc_host_is_spi(host) && host->use_spi_crc) 382 lim.features |= BLK_FEAT_STABLE_WRITES; 383 384 disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); 385 if (IS_ERR(disk)) 386 return disk; 387 mq->queue = disk->queue; 388 389 blk_queue_rq_timeout(mq->queue, 60 * HZ); 390 391 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); 392 393 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); 394 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); 395 396 mutex_init(&mq->complete_lock); 397 398 init_waitqueue_head(&mq->wait); 399 400 mmc_crypto_setup_queue(mq->queue, host); 401 return disk; 402 } 403 404 static inline bool mmc_merge_capable(struct mmc_host *host) 405 { 406 return host->caps2 & MMC_CAP2_MERGE_CAPABLE; 407 } 408 409 /* Set queue depth to get a reasonable value for q->nr_requests */ 410 #define MMC_QUEUE_DEPTH 64 411 412 /** 413 * mmc_init_queue - initialise a queue structure. 414 * @mq: mmc queue 415 * @card: mmc card to attach this queue 416 * @features: block layer features (BLK_FEAT_*) 417 * 418 * Initialise a MMC card request queue. 419 */ 420 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 421 unsigned int features) 422 { 423 struct mmc_host *host = card->host; 424 struct gendisk *disk; 425 int ret; 426 427 mq->card = card; 428 429 spin_lock_init(&mq->lock); 430 431 memset(&mq->tag_set, 0, sizeof(mq->tag_set)); 432 mq->tag_set.ops = &mmc_mq_ops; 433 /* 434 * The queue depth for CQE must match the hardware because the request 435 * tag is used to index the hardware queue. 436 */ 437 if (host->cqe_enabled && !host->hsq_enabled) 438 mq->tag_set.queue_depth = 439 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); 440 else 441 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; 442 mq->tag_set.numa_node = NUMA_NO_NODE; 443 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 444 mq->tag_set.nr_hw_queues = 1; 445 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); 446 mq->tag_set.driver_data = mq; 447 448 /* 449 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, 450 * the host->can_dma_map_merge should be set before to get max_segs 451 * from mmc_get_max_segments(). 452 */ 453 if (mmc_merge_capable(host) && 454 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && 455 dma_get_merge_boundary(mmc_dev(host))) 456 host->can_dma_map_merge = 1; 457 else 458 host->can_dma_map_merge = 0; 459 460 ret = blk_mq_alloc_tag_set(&mq->tag_set); 461 if (ret) 462 return ERR_PTR(ret); 463 464 465 disk = mmc_alloc_disk(mq, card, features); 466 if (IS_ERR(disk)) 467 blk_mq_free_tag_set(&mq->tag_set); 468 return disk; 469 } 470 471 void mmc_queue_suspend(struct mmc_queue *mq) 472 { 473 blk_mq_quiesce_queue(mq->queue); 474 475 /* 476 * The host remains claimed while there are outstanding requests, so 477 * simply claiming and releasing here ensures there are none. 478 */ 479 mmc_claim_host(mq->card->host); 480 mmc_release_host(mq->card->host); 481 } 482 483 void mmc_queue_resume(struct mmc_queue *mq) 484 { 485 blk_mq_unquiesce_queue(mq->queue); 486 } 487 488 void mmc_cleanup_queue(struct mmc_queue *mq) 489 { 490 struct request_queue *q = mq->queue; 491 492 /* 493 * The legacy code handled the possibility of being suspended, 494 * so do that here too. 495 */ 496 if (blk_queue_quiesced(q)) 497 blk_mq_unquiesce_queue(q); 498 499 /* 500 * If the recovery completes the last (and only remaining) request in 501 * the queue, and the card has been removed, we could end up here with 502 * the recovery not quite finished yet, so cancel it. 503 */ 504 cancel_work_sync(&mq->recovery_work); 505 506 blk_mq_free_tag_set(&mq->tag_set); 507 508 /* 509 * A request can be completed before the next request, potentially 510 * leaving a complete_work with nothing to do. Such a work item might 511 * still be queued at this point. Flush it. 512 */ 513 flush_work(&mq->complete_work); 514 515 mq->card = NULL; 516 } 517 518 /* 519 * Prepare the sg list(s) to be handed of to the host driver 520 */ 521 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 522 { 523 struct request *req = mmc_queue_req_to_req(mqrq); 524 525 return blk_rq_map_sg(mq->queue, req, mqrq->sg); 526 } 527