1 /* 2 * Copyright (C) 2003 Russell King, All Rights Reserved. 3 * Copyright 2006-2007 Pierre Ossman 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 */ 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 #include <linux/blkdev.h> 13 #include <linux/freezer.h> 14 #include <linux/kthread.h> 15 #include <linux/scatterlist.h> 16 #include <linux/dma-mapping.h> 17 18 #include <linux/mmc/card.h> 19 #include <linux/mmc/host.h> 20 21 #include "queue.h" 22 #include "block.h" 23 #include "core.h" 24 #include "card.h" 25 26 #define MMC_QUEUE_BOUNCESZ 65536 27 28 /* 29 * Prepare a MMC request. This just filters out odd stuff. 30 */ 31 static int mmc_prep_request(struct request_queue *q, struct request *req) 32 { 33 struct mmc_queue *mq = q->queuedata; 34 35 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 36 return BLKPREP_KILL; 37 38 req->rq_flags |= RQF_DONTPREP; 39 40 return BLKPREP_OK; 41 } 42 43 static int mmc_queue_thread(void *d) 44 { 45 struct mmc_queue *mq = d; 46 struct request_queue *q = mq->queue; 47 struct mmc_context_info *cntx = &mq->card->host->context_info; 48 49 current->flags |= PF_MEMALLOC; 50 51 down(&mq->thread_sem); 52 do { 53 struct request *req = NULL; 54 55 spin_lock_irq(q->queue_lock); 56 set_current_state(TASK_INTERRUPTIBLE); 57 req = blk_fetch_request(q); 58 mq->asleep = false; 59 cntx->is_waiting_last_req = false; 60 cntx->is_new_req = false; 61 if (!req) { 62 /* 63 * Dispatch queue is empty so set flags for 64 * mmc_request_fn() to wake us up. 65 */ 66 if (mq->mqrq_prev->req) 67 cntx->is_waiting_last_req = true; 68 else 69 mq->asleep = true; 70 } 71 mq->mqrq_cur->req = req; 72 spin_unlock_irq(q->queue_lock); 73 74 if (req || mq->mqrq_prev->req) { 75 bool req_is_special = mmc_req_is_special(req); 76 77 set_current_state(TASK_RUNNING); 78 mmc_blk_issue_rq(mq, req); 79 cond_resched(); 80 if (mq->new_request) { 81 mq->new_request = false; 82 continue; /* fetch again */ 83 } 84 85 /* 86 * Current request becomes previous request 87 * and vice versa. 88 * In case of special requests, current request 89 * has been finished. Do not assign it to previous 90 * request. 91 */ 92 if (req_is_special) 93 mq->mqrq_cur->req = NULL; 94 95 mq->mqrq_prev->brq.mrq.data = NULL; 96 mq->mqrq_prev->req = NULL; 97 swap(mq->mqrq_prev, mq->mqrq_cur); 98 } else { 99 if (kthread_should_stop()) { 100 set_current_state(TASK_RUNNING); 101 break; 102 } 103 up(&mq->thread_sem); 104 schedule(); 105 down(&mq->thread_sem); 106 } 107 } while (1); 108 up(&mq->thread_sem); 109 110 return 0; 111 } 112 113 /* 114 * Generic MMC request handler. This is called for any queue on a 115 * particular host. When the host is not busy, we look for a request 116 * on any queue on this host, and attempt to issue it. This may 117 * not be the queue we were asked to process. 118 */ 119 static void mmc_request_fn(struct request_queue *q) 120 { 121 struct mmc_queue *mq = q->queuedata; 122 struct request *req; 123 struct mmc_context_info *cntx; 124 125 if (!mq) { 126 while ((req = blk_fetch_request(q)) != NULL) { 127 req->rq_flags |= RQF_QUIET; 128 __blk_end_request_all(req, -EIO); 129 } 130 return; 131 } 132 133 cntx = &mq->card->host->context_info; 134 135 if (cntx->is_waiting_last_req) { 136 cntx->is_new_req = true; 137 wake_up_interruptible(&cntx->wait); 138 } 139 140 if (mq->asleep) 141 wake_up_process(mq->thread); 142 } 143 144 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) 145 { 146 struct scatterlist *sg; 147 148 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); 149 if (!sg) 150 *err = -ENOMEM; 151 else { 152 *err = 0; 153 sg_init_table(sg, sg_len); 154 } 155 156 return sg; 157 } 158 159 static void mmc_queue_setup_discard(struct request_queue *q, 160 struct mmc_card *card) 161 { 162 unsigned max_discard; 163 164 max_discard = mmc_calc_max_discard(card); 165 if (!max_discard) 166 return; 167 168 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 169 blk_queue_max_discard_sectors(q, max_discard); 170 q->limits.discard_granularity = card->pref_erase << 9; 171 /* granularity must not be greater than max. discard */ 172 if (card->pref_erase > max_discard) 173 q->limits.discard_granularity = 0; 174 if (mmc_can_secure_erase_trim(card)) 175 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 176 } 177 178 #ifdef CONFIG_MMC_BLOCK_BOUNCE 179 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, 180 unsigned int bouncesz) 181 { 182 int i; 183 184 for (i = 0; i < mq->qdepth; i++) { 185 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 186 if (!mq->mqrq[i].bounce_buf) 187 goto out_err; 188 } 189 190 return true; 191 192 out_err: 193 while (--i >= 0) { 194 kfree(mq->mqrq[i].bounce_buf); 195 mq->mqrq[i].bounce_buf = NULL; 196 } 197 pr_warn("%s: unable to allocate bounce buffers\n", 198 mmc_card_name(mq->card)); 199 return false; 200 } 201 202 static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, 203 unsigned int bouncesz) 204 { 205 int i, ret; 206 207 for (i = 0; i < mq->qdepth; i++) { 208 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); 209 if (ret) 210 return ret; 211 212 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); 213 if (ret) 214 return ret; 215 } 216 217 return 0; 218 } 219 #endif 220 221 static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) 222 { 223 int i, ret; 224 225 for (i = 0; i < mq->qdepth; i++) { 226 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); 227 if (ret) 228 return ret; 229 } 230 231 return 0; 232 } 233 234 static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) 235 { 236 kfree(mqrq->bounce_sg); 237 mqrq->bounce_sg = NULL; 238 239 kfree(mqrq->sg); 240 mqrq->sg = NULL; 241 242 kfree(mqrq->bounce_buf); 243 mqrq->bounce_buf = NULL; 244 } 245 246 static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) 247 { 248 int i; 249 250 for (i = 0; i < mq->qdepth; i++) 251 mmc_queue_req_free_bufs(&mq->mqrq[i]); 252 } 253 254 /** 255 * mmc_init_queue - initialise a queue structure. 256 * @mq: mmc queue 257 * @card: mmc card to attach this queue 258 * @lock: queue lock 259 * @subname: partition subname 260 * 261 * Initialise a MMC card request queue. 262 */ 263 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, 264 spinlock_t *lock, const char *subname) 265 { 266 struct mmc_host *host = card->host; 267 u64 limit = BLK_BOUNCE_HIGH; 268 bool bounce = false; 269 int ret = -ENOMEM; 270 271 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 272 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 273 274 mq->card = card; 275 mq->queue = blk_init_queue(mmc_request_fn, lock); 276 if (!mq->queue) 277 return -ENOMEM; 278 279 mq->qdepth = 2; 280 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), 281 GFP_KERNEL); 282 if (!mq->mqrq) 283 goto blk_cleanup; 284 mq->mqrq_cur = &mq->mqrq[0]; 285 mq->mqrq_prev = &mq->mqrq[1]; 286 mq->queue->queuedata = mq; 287 288 blk_queue_prep_rq(mq->queue, mmc_prep_request); 289 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 290 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); 291 if (mmc_can_erase(card)) 292 mmc_queue_setup_discard(mq->queue, card); 293 294 #ifdef CONFIG_MMC_BLOCK_BOUNCE 295 if (host->max_segs == 1) { 296 unsigned int bouncesz; 297 298 bouncesz = MMC_QUEUE_BOUNCESZ; 299 300 if (bouncesz > host->max_req_size) 301 bouncesz = host->max_req_size; 302 if (bouncesz > host->max_seg_size) 303 bouncesz = host->max_seg_size; 304 if (bouncesz > (host->max_blk_count * 512)) 305 bouncesz = host->max_blk_count * 512; 306 307 if (bouncesz > 512 && 308 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { 309 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 310 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 311 blk_queue_max_segments(mq->queue, bouncesz / 512); 312 blk_queue_max_segment_size(mq->queue, bouncesz); 313 314 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); 315 if (ret) 316 goto cleanup_queue; 317 bounce = true; 318 } 319 } 320 #endif 321 322 if (!bounce) { 323 blk_queue_bounce_limit(mq->queue, limit); 324 blk_queue_max_hw_sectors(mq->queue, 325 min(host->max_blk_count, host->max_req_size / 512)); 326 blk_queue_max_segments(mq->queue, host->max_segs); 327 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 328 329 ret = mmc_queue_alloc_sgs(mq, host->max_segs); 330 if (ret) 331 goto cleanup_queue; 332 } 333 334 sema_init(&mq->thread_sem, 1); 335 336 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", 337 host->index, subname ? subname : ""); 338 339 if (IS_ERR(mq->thread)) { 340 ret = PTR_ERR(mq->thread); 341 goto cleanup_queue; 342 } 343 344 return 0; 345 346 cleanup_queue: 347 mmc_queue_reqs_free_bufs(mq); 348 kfree(mq->mqrq); 349 mq->mqrq = NULL; 350 blk_cleanup: 351 blk_cleanup_queue(mq->queue); 352 return ret; 353 } 354 355 void mmc_cleanup_queue(struct mmc_queue *mq) 356 { 357 struct request_queue *q = mq->queue; 358 unsigned long flags; 359 360 /* Make sure the queue isn't suspended, as that will deadlock */ 361 mmc_queue_resume(mq); 362 363 /* Then terminate our worker thread */ 364 kthread_stop(mq->thread); 365 366 /* Empty the queue */ 367 spin_lock_irqsave(q->queue_lock, flags); 368 q->queuedata = NULL; 369 blk_start_queue(q); 370 spin_unlock_irqrestore(q->queue_lock, flags); 371 372 mmc_queue_reqs_free_bufs(mq); 373 kfree(mq->mqrq); 374 mq->mqrq = NULL; 375 376 mq->card = NULL; 377 } 378 EXPORT_SYMBOL(mmc_cleanup_queue); 379 380 /** 381 * mmc_queue_suspend - suspend a MMC request queue 382 * @mq: MMC queue to suspend 383 * 384 * Stop the block request queue, and wait for our thread to 385 * complete any outstanding requests. This ensures that we 386 * won't suspend while a request is being processed. 387 */ 388 void mmc_queue_suspend(struct mmc_queue *mq) 389 { 390 struct request_queue *q = mq->queue; 391 unsigned long flags; 392 393 if (!mq->suspended) { 394 mq->suspended |= true; 395 396 spin_lock_irqsave(q->queue_lock, flags); 397 blk_stop_queue(q); 398 spin_unlock_irqrestore(q->queue_lock, flags); 399 400 down(&mq->thread_sem); 401 } 402 } 403 404 /** 405 * mmc_queue_resume - resume a previously suspended MMC request queue 406 * @mq: MMC queue to resume 407 */ 408 void mmc_queue_resume(struct mmc_queue *mq) 409 { 410 struct request_queue *q = mq->queue; 411 unsigned long flags; 412 413 if (mq->suspended) { 414 mq->suspended = false; 415 416 up(&mq->thread_sem); 417 418 spin_lock_irqsave(q->queue_lock, flags); 419 blk_start_queue(q); 420 spin_unlock_irqrestore(q->queue_lock, flags); 421 } 422 } 423 424 /* 425 * Prepare the sg list(s) to be handed of to the host driver 426 */ 427 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 428 { 429 unsigned int sg_len; 430 size_t buflen; 431 struct scatterlist *sg; 432 int i; 433 434 if (!mqrq->bounce_buf) 435 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); 436 437 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); 438 439 mqrq->bounce_sg_len = sg_len; 440 441 buflen = 0; 442 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) 443 buflen += sg->length; 444 445 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); 446 447 return 1; 448 } 449 450 /* 451 * If writing, bounce the data to the buffer before the request 452 * is sent to the host driver 453 */ 454 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) 455 { 456 if (!mqrq->bounce_buf) 457 return; 458 459 if (rq_data_dir(mqrq->req) != WRITE) 460 return; 461 462 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 463 mqrq->bounce_buf, mqrq->sg[0].length); 464 } 465 466 /* 467 * If reading, bounce the data from the buffer after the request 468 * has been handled by the host driver 469 */ 470 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) 471 { 472 if (!mqrq->bounce_buf) 473 return; 474 475 if (rq_data_dir(mqrq->req) != READ) 476 return; 477 478 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 479 mqrq->bounce_buf, mqrq->sg[0].length); 480 } 481