1 /* 2 * Copyright (C) 2003 Russell King, All Rights Reserved. 3 * Copyright 2006-2007 Pierre Ossman 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 */ 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 #include <linux/blkdev.h> 13 #include <linux/freezer.h> 14 #include <linux/kthread.h> 15 #include <linux/scatterlist.h> 16 #include <linux/dma-mapping.h> 17 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/host.h> 20 21 #include "queue.h" 22 #include "block.h" 23 #include "core.h" 24 #include "card.h" 25 26 #define MMC_QUEUE_BOUNCESZ 65536 27 28 /* 29 * Prepare a MMC request. This just filters out odd stuff. 30 */ 31 static int mmc_prep_request(struct request_queue *q, struct request *req) 32 { 33 struct mmc_queue *mq = q->queuedata; 34 35 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 36 return BLKPREP_KILL; 37 38 req->rq_flags |= RQF_DONTPREP; 39 40 return BLKPREP_OK; 41 } 42 43 struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq, 44 struct request *req) 45 { 46 struct mmc_queue_req *mqrq; 47 int i = ffz(mq->qslots); 48 49 if (i >= mq->qdepth) 50 return NULL; 51 52 mqrq = &mq->mqrq[i]; 53 WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth || 54 test_bit(mqrq->task_id, &mq->qslots)); 55 mqrq->req = req; 56 mq->qcnt += 1; 57 __set_bit(mqrq->task_id, &mq->qslots); 58 59 return mqrq; 60 } 61 62 void mmc_queue_req_free(struct mmc_queue *mq, 63 struct mmc_queue_req *mqrq) 64 { 65 WARN_ON(!mqrq->req || mq->qcnt < 1 || 66 !test_bit(mqrq->task_id, &mq->qslots)); 67 mqrq->req = NULL; 68 mq->qcnt -= 1; 69 __clear_bit(mqrq->task_id, &mq->qslots); 70 } 71 72 static int mmc_queue_thread(void *d) 73 { 74 struct mmc_queue *mq = d; 75 struct request_queue *q = mq->queue; 76 struct mmc_context_info *cntx = &mq->card->host->context_info; 77 78 current->flags |= PF_MEMALLOC; 79 80 down(&mq->thread_sem); 81 do { 82 struct request *req; 83 84 spin_lock_irq(q->queue_lock); 85 set_current_state(TASK_INTERRUPTIBLE); 86 req = blk_fetch_request(q); 87 mq->asleep = false; 88 cntx->is_waiting_last_req = false; 89 cntx->is_new_req = false; 90 if (!req) { 91 /* 92 * Dispatch queue is empty so set flags for 93 * mmc_request_fn() to wake us up. 94 */ 95 if (mq->qcnt) 96 cntx->is_waiting_last_req = true; 97 else 98 mq->asleep = true; 99 } 100 spin_unlock_irq(q->queue_lock); 101 102 if (req || mq->qcnt) { 103 set_current_state(TASK_RUNNING); 104 mmc_blk_issue_rq(mq, req); 105 cond_resched(); 106 } else { 107 if (kthread_should_stop()) { 108 set_current_state(TASK_RUNNING); 109 break; 110 } 111 up(&mq->thread_sem); 112 schedule(); 113 down(&mq->thread_sem); 114 } 115 } while (1); 116 up(&mq->thread_sem); 117 118 return 0; 119 } 120 121 /* 122 * Generic MMC request handler. This is called for any queue on a 123 * particular host. When the host is not busy, we look for a request 124 * on any queue on this host, and attempt to issue it. This may 125 * not be the queue we were asked to process. 126 */ 127 static void mmc_request_fn(struct request_queue *q) 128 { 129 struct mmc_queue *mq = q->queuedata; 130 struct request *req; 131 struct mmc_context_info *cntx; 132 133 if (!mq) { 134 while ((req = blk_fetch_request(q)) != NULL) { 135 req->rq_flags |= RQF_QUIET; 136 __blk_end_request_all(req, -EIO); 137 } 138 return; 139 } 140 141 cntx = &mq->card->host->context_info; 142 143 if (cntx->is_waiting_last_req) { 144 cntx->is_new_req = true; 145 wake_up_interruptible(&cntx->wait); 146 } 147 148 if (mq->asleep) 149 wake_up_process(mq->thread); 150 } 151 152 static struct scatterlist *mmc_alloc_sg(int sg_len) 153 { 154 struct scatterlist *sg; 155 156 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); 157 if (sg) 158 sg_init_table(sg, sg_len); 159 160 return sg; 161 } 162 163 static void mmc_queue_setup_discard(struct request_queue *q, 164 struct mmc_card *card) 165 { 166 unsigned max_discard; 167 168 max_discard = mmc_calc_max_discard(card); 169 if (!max_discard) 170 return; 171 172 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 173 blk_queue_max_discard_sectors(q, max_discard); 174 q->limits.discard_granularity = card->pref_erase << 9; 175 /* granularity must not be greater than max. discard */ 176 if (card->pref_erase > max_discard) 177 q->limits.discard_granularity = 0; 178 if (mmc_can_secure_erase_trim(card)) 179 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 180 } 181 182 static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) 183 { 184 kfree(mqrq->bounce_sg); 185 mqrq->bounce_sg = NULL; 186 187 kfree(mqrq->sg); 188 mqrq->sg = NULL; 189 190 kfree(mqrq->bounce_buf); 191 mqrq->bounce_buf = NULL; 192 } 193 194 static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth) 195 { 196 int i; 197 198 for (i = 0; i < qdepth; i++) 199 mmc_queue_req_free_bufs(&mqrq[i]); 200 } 201 202 static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth) 203 { 204 mmc_queue_reqs_free_bufs(mqrq, qdepth); 205 kfree(mqrq); 206 } 207 208 static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) 209 { 210 struct mmc_queue_req *mqrq; 211 int i; 212 213 mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL); 214 if (mqrq) { 215 for (i = 0; i < qdepth; i++) 216 mqrq[i].task_id = i; 217 } 218 219 return mqrq; 220 } 221 222 #ifdef CONFIG_MMC_BLOCK_BOUNCE 223 static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth, 224 unsigned int bouncesz) 225 { 226 int i; 227 228 for (i = 0; i < qdepth; i++) { 229 mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 230 if (!mqrq[i].bounce_buf) 231 return -ENOMEM; 232 233 mqrq[i].sg = mmc_alloc_sg(1); 234 if (!mqrq[i].sg) 235 return -ENOMEM; 236 237 mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512); 238 if (!mqrq[i].bounce_sg) 239 return -ENOMEM; 240 } 241 242 return 0; 243 } 244 245 static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth, 246 unsigned int bouncesz) 247 { 248 int ret; 249 250 ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz); 251 if (ret) 252 mmc_queue_reqs_free_bufs(mqrq, qdepth); 253 254 return !ret; 255 } 256 257 static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) 258 { 259 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; 260 261 if (host->max_segs != 1) 262 return 0; 263 264 if (bouncesz > host->max_req_size) 265 bouncesz = host->max_req_size; 266 if (bouncesz > host->max_seg_size) 267 bouncesz = host->max_seg_size; 268 if (bouncesz > host->max_blk_count * 512) 269 bouncesz = host->max_blk_count * 512; 270 271 if (bouncesz <= 512) 272 return 0; 273 274 return bouncesz; 275 } 276 #else 277 static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, 278 int qdepth, unsigned int bouncesz) 279 { 280 return false; 281 } 282 283 static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) 284 { 285 return 0; 286 } 287 #endif 288 289 static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth, 290 int max_segs) 291 { 292 int i; 293 294 for (i = 0; i < qdepth; i++) { 295 mqrq[i].sg = mmc_alloc_sg(max_segs); 296 if (!mqrq[i].sg) 297 return -ENOMEM; 298 } 299 300 return 0; 301 } 302 303 void mmc_queue_free_shared_queue(struct mmc_card *card) 304 { 305 if (card->mqrq) { 306 mmc_queue_free_mqrqs(card->mqrq, card->qdepth); 307 card->mqrq = NULL; 308 } 309 } 310 311 static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth) 312 { 313 struct mmc_host *host = card->host; 314 struct mmc_queue_req *mqrq; 315 unsigned int bouncesz; 316 int ret = 0; 317 318 if (card->mqrq) 319 return -EINVAL; 320 321 mqrq = mmc_queue_alloc_mqrqs(qdepth); 322 if (!mqrq) 323 return -ENOMEM; 324 325 card->mqrq = mqrq; 326 card->qdepth = qdepth; 327 328 bouncesz = mmc_queue_calc_bouncesz(host); 329 330 if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) { 331 bouncesz = 0; 332 pr_warn("%s: unable to allocate bounce buffers\n", 333 mmc_card_name(card)); 334 } 335 336 card->bouncesz = bouncesz; 337 338 if (!bouncesz) { 339 ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs); 340 if (ret) 341 goto out_err; 342 } 343 344 return ret; 345 346 out_err: 347 mmc_queue_free_shared_queue(card); 348 return ret; 349 } 350 351 int mmc_queue_alloc_shared_queue(struct mmc_card *card) 352 { 353 return __mmc_queue_alloc_shared_queue(card, 2); 354 } 355 356 /** 357 * mmc_init_queue - initialise a queue structure. 358 * @mq: mmc queue 359 * @card: mmc card to attach this queue 360 * @lock: queue lock 361 * @subname: partition subname 362 * 363 * Initialise a MMC card request queue. 364 */ 365 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 366 spinlock_t *lock, const char *subname) 367 { 368 struct mmc_host *host = card->host; 369 u64 limit = BLK_BOUNCE_HIGH; 370 int ret = -ENOMEM; 371 372 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 373 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 374 375 mq->card = card; 376 mq->queue = blk_init_queue(mmc_request_fn, lock); 377 if (!mq->queue) 378 return -ENOMEM; 379 380 mq->mqrq = card->mqrq; 381 mq->qdepth = card->qdepth; 382 mq->queue->queuedata = mq; 383 384 blk_queue_prep_rq(mq->queue, mmc_prep_request); 385 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 386 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); 387 if (mmc_can_erase(card)) 388 mmc_queue_setup_discard(mq->queue, card); 389 390 if (card->bouncesz) { 391 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 392 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); 393 blk_queue_max_segments(mq->queue, card->bouncesz / 512); 394 blk_queue_max_segment_size(mq->queue, card->bouncesz); 395 } else { 396 blk_queue_bounce_limit(mq->queue, limit); 397 blk_queue_max_hw_sectors(mq->queue, 398 min(host->max_blk_count, host->max_req_size / 512)); 399 blk_queue_max_segments(mq->queue, host->max_segs); 400 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 401 } 402 403 sema_init(&mq->thread_sem, 1); 404 405 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 406 host->index, subname ? subname : ""); 407 408 if (IS_ERR(mq->thread)) { 409 ret = PTR_ERR(mq->thread); 410 goto cleanup_queue; 411 } 412 413 return 0; 414 415 cleanup_queue: 416 mq->mqrq = NULL; 417 blk_cleanup_queue(mq->queue); 418 return ret; 419 } 420 421 void mmc_cleanup_queue(struct mmc_queue *mq) 422 { 423 struct request_queue *q = mq->queue; 424 unsigned long flags; 425 426 /* Make sure the queue isn't suspended, as that will deadlock */ 427 mmc_queue_resume(mq); 428 429 /* Then terminate our worker thread */ 430 kthread_stop(mq->thread); 431 432 /* Empty the queue */ 433 spin_lock_irqsave(q->queue_lock, flags); 434 q->queuedata = NULL; 435 blk_start_queue(q); 436 spin_unlock_irqrestore(q->queue_lock, flags); 437 438 mq->mqrq = NULL; 439 mq->card = NULL; 440 } 441 EXPORT_SYMBOL(mmc_cleanup_queue); 442 443 /** 444 * mmc_queue_suspend - suspend a MMC request queue 445 * @mq: MMC queue to suspend 446 * 447 * Stop the block request queue, and wait for our thread to 448 * complete any outstanding requests. This ensures that we 449 * won't suspend while a request is being processed. 450 */ 451 void mmc_queue_suspend(struct mmc_queue *mq) 452 { 453 struct request_queue *q = mq->queue; 454 unsigned long flags; 455 456 if (!mq->suspended) { 457 mq->suspended |= true; 458 459 spin_lock_irqsave(q->queue_lock, flags); 460 blk_stop_queue(q); 461 spin_unlock_irqrestore(q->queue_lock, flags); 462 463 down(&mq->thread_sem); 464 } 465 } 466 467 /** 468 * mmc_queue_resume - resume a previously suspended MMC request queue 469 * @mq: MMC queue to resume 470 */ 471 void mmc_queue_resume(struct mmc_queue *mq) 472 { 473 struct request_queue *q = mq->queue; 474 unsigned long flags; 475 476 if (mq->suspended) { 477 mq->suspended = false; 478 479 up(&mq->thread_sem); 480 481 spin_lock_irqsave(q->queue_lock, flags); 482 blk_start_queue(q); 483 spin_unlock_irqrestore(q->queue_lock, flags); 484 } 485 } 486 487 /* 488 * Prepare the sg list(s) to be handed of to the host driver 489 */ 490 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 491 { 492 unsigned int sg_len; 493 size_t buflen; 494 struct scatterlist *sg; 495 int i; 496 497 if (!mqrq->bounce_buf) 498 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 499 500 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 501 502 mqrq->bounce_sg_len = sg_len; 503 504 buflen = 0; 505 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 506 buflen += sg->length; 507 508 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 509 510 return 1; 511 } 512 513 /* 514 * If writing, bounce the data to the buffer before the request 515 * is sent to the host driver 516 */ 517 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 518 { 519 if (!mqrq->bounce_buf) 520 return; 521 522 if (rq_data_dir(mqrq->req) != WRITE) 523 return; 524 525 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 526 mqrq->bounce_buf, mqrq->sg[0].length); 527 } 528 529 /* 530 * If reading, bounce the data from the buffer after the request 531 * has been handled by the host driver 532 */ 533 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 534 { 535 if (!mqrq->bounce_buf) 536 return; 537 538 if (rq_data_dir(mqrq->req) != READ) 539 return; 540 541 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 542 mqrq->bounce_buf, mqrq->sg[0].length); 543 } 544