1 /* 2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-core.h" 8 #include "dm-rq.h" 9 10 #include <linux/elevator.h> /* for rq_end_sector() */ 11 #include <linux/blk-mq.h> 12 13 #define DM_MSG_PREFIX "core-rq" 14 15 /* 16 * One of these is allocated per request. 17 */ 18 struct dm_rq_target_io { 19 struct mapped_device *md; 20 struct dm_target *ti; 21 struct request *orig, *clone; 22 struct kthread_work work; 23 blk_status_t error; 24 union map_info info; 25 struct dm_stats_aux stats_aux; 26 unsigned long duration_jiffies; 27 unsigned n_sectors; 28 unsigned completed; 29 }; 30 31 #define DM_MQ_NR_HW_QUEUES 1 32 #define DM_MQ_QUEUE_DEPTH 2048 33 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; 34 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; 35 36 /* 37 * Request-based DM's mempools' reserved IOs set by the user. 38 */ 39 #define RESERVED_REQUEST_BASED_IOS 256 40 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; 41 42 unsigned dm_get_reserved_rq_based_ios(void) 43 { 44 return __dm_get_module_param(&reserved_rq_based_ios, 45 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); 46 } 47 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); 48 49 static unsigned dm_get_blk_mq_nr_hw_queues(void) 50 { 51 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); 52 } 53 54 static unsigned dm_get_blk_mq_queue_depth(void) 55 { 56 return __dm_get_module_param(&dm_mq_queue_depth, 57 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); 58 } 59 60 int dm_request_based(struct mapped_device *md) 61 { 62 return queue_is_mq(md->queue); 63 } 64 65 void dm_start_queue(struct request_queue *q) 66 { 67 blk_mq_unquiesce_queue(q); 68 blk_mq_kick_requeue_list(q); 69 } 70 71 void dm_stop_queue(struct request_queue *q) 72 { 73 if (blk_mq_queue_stopped(q)) 74 return; 75 76 blk_mq_quiesce_queue(q); 77 } 78 79 /* 80 * Partial completion handling for request-based dm 81 */ 82 static void end_clone_bio(struct bio *clone) 83 { 84 struct dm_rq_clone_bio_info *info = 85 container_of(clone, struct dm_rq_clone_bio_info, clone); 86 struct dm_rq_target_io *tio = info->tio; 87 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 88 blk_status_t error = clone->bi_status; 89 bool is_last = !clone->bi_next; 90 91 bio_put(clone); 92 93 if (tio->error) 94 /* 95 * An error has already been detected on the request. 96 * Once error occurred, just let clone->end_io() handle 97 * the remainder. 98 */ 99 return; 100 else if (error) { 101 /* 102 * Don't notice the error to the upper layer yet. 103 * The error handling decision is made by the target driver, 104 * when the request is completed. 105 */ 106 tio->error = error; 107 goto exit; 108 } 109 110 /* 111 * I/O for the bio successfully completed. 112 * Notice the data completion to the upper layer. 113 */ 114 tio->completed += nr_bytes; 115 116 /* 117 * Update the original request. 118 * Do not use blk_mq_end_request() here, because it may complete 119 * the original request before the clone, and break the ordering. 120 */ 121 if (is_last) 122 exit: 123 blk_update_request(tio->orig, BLK_STS_OK, tio->completed); 124 } 125 126 static struct dm_rq_target_io *tio_from_request(struct request *rq) 127 { 128 return blk_mq_rq_to_pdu(rq); 129 } 130 131 static void rq_end_stats(struct mapped_device *md, struct request *orig) 132 { 133 if (unlikely(dm_stats_used(&md->stats))) { 134 struct dm_rq_target_io *tio = tio_from_request(orig); 135 tio->duration_jiffies = jiffies - tio->duration_jiffies; 136 dm_stats_account_io(&md->stats, rq_data_dir(orig), 137 blk_rq_pos(orig), tio->n_sectors, true, 138 tio->duration_jiffies, &tio->stats_aux); 139 } 140 } 141 142 /* 143 * Don't touch any member of the md after calling this function because 144 * the md may be freed in dm_put() at the end of this function. 145 * Or do dm_get() before calling this function and dm_put() later. 146 */ 147 static void rq_completed(struct mapped_device *md) 148 { 149 /* 150 * dm_put() must be at the end of this function. See the comment above 151 */ 152 dm_put(md); 153 } 154 155 /* 156 * Complete the clone and the original request. 157 * Must be called without clone's queue lock held, 158 * see end_clone_request() for more details. 159 */ 160 static void dm_end_request(struct request *clone, blk_status_t error) 161 { 162 struct dm_rq_target_io *tio = clone->end_io_data; 163 struct mapped_device *md = tio->md; 164 struct request *rq = tio->orig; 165 166 blk_rq_unprep_clone(clone); 167 tio->ti->type->release_clone_rq(clone, NULL); 168 169 rq_end_stats(md, rq); 170 blk_mq_end_request(rq, error); 171 rq_completed(md); 172 } 173 174 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) 175 { 176 blk_mq_delay_kick_requeue_list(q, msecs); 177 } 178 179 void dm_mq_kick_requeue_list(struct mapped_device *md) 180 { 181 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); 182 } 183 EXPORT_SYMBOL(dm_mq_kick_requeue_list); 184 185 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) 186 { 187 blk_mq_requeue_request(rq, false); 188 __dm_mq_kick_requeue_list(rq->q, msecs); 189 } 190 191 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) 192 { 193 struct mapped_device *md = tio->md; 194 struct request *rq = tio->orig; 195 unsigned long delay_ms = delay_requeue ? 100 : 0; 196 197 rq_end_stats(md, rq); 198 if (tio->clone) { 199 blk_rq_unprep_clone(tio->clone); 200 tio->ti->type->release_clone_rq(tio->clone, NULL); 201 } 202 203 dm_mq_delay_requeue_request(rq, delay_ms); 204 rq_completed(md); 205 } 206 207 static void dm_done(struct request *clone, blk_status_t error, bool mapped) 208 { 209 int r = DM_ENDIO_DONE; 210 struct dm_rq_target_io *tio = clone->end_io_data; 211 dm_request_endio_fn rq_end_io = NULL; 212 213 if (tio->ti) { 214 rq_end_io = tio->ti->type->rq_end_io; 215 216 if (mapped && rq_end_io) 217 r = rq_end_io(tio->ti, clone, error, &tio->info); 218 } 219 220 if (unlikely(error == BLK_STS_TARGET)) { 221 if (req_op(clone) == REQ_OP_DISCARD && 222 !clone->q->limits.max_discard_sectors) 223 disable_discard(tio->md); 224 else if (req_op(clone) == REQ_OP_WRITE_SAME && 225 !clone->q->limits.max_write_same_sectors) 226 disable_write_same(tio->md); 227 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && 228 !clone->q->limits.max_write_zeroes_sectors) 229 disable_write_zeroes(tio->md); 230 } 231 232 switch (r) { 233 case DM_ENDIO_DONE: 234 /* The target wants to complete the I/O */ 235 dm_end_request(clone, error); 236 break; 237 case DM_ENDIO_INCOMPLETE: 238 /* The target will handle the I/O */ 239 return; 240 case DM_ENDIO_REQUEUE: 241 /* The target wants to requeue the I/O */ 242 dm_requeue_original_request(tio, false); 243 break; 244 case DM_ENDIO_DELAY_REQUEUE: 245 /* The target wants to requeue the I/O after a delay */ 246 dm_requeue_original_request(tio, true); 247 break; 248 default: 249 DMWARN("unimplemented target endio return value: %d", r); 250 BUG(); 251 } 252 } 253 254 /* 255 * Request completion handler for request-based dm 256 */ 257 static void dm_softirq_done(struct request *rq) 258 { 259 bool mapped = true; 260 struct dm_rq_target_io *tio = tio_from_request(rq); 261 struct request *clone = tio->clone; 262 263 if (!clone) { 264 struct mapped_device *md = tio->md; 265 266 rq_end_stats(md, rq); 267 blk_mq_end_request(rq, tio->error); 268 rq_completed(md); 269 return; 270 } 271 272 if (rq->rq_flags & RQF_FAILED) 273 mapped = false; 274 275 dm_done(clone, tio->error, mapped); 276 } 277 278 /* 279 * Complete the clone and the original request with the error status 280 * through softirq context. 281 */ 282 static void dm_complete_request(struct request *rq, blk_status_t error) 283 { 284 struct dm_rq_target_io *tio = tio_from_request(rq); 285 286 tio->error = error; 287 if (likely(!blk_should_fake_timeout(rq->q))) 288 blk_mq_complete_request(rq); 289 } 290 291 /* 292 * Complete the not-mapped clone and the original request with the error status 293 * through softirq context. 294 * Target's rq_end_io() function isn't called. 295 * This may be used when the target's clone_and_map_rq() function fails. 296 */ 297 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) 298 { 299 rq->rq_flags |= RQF_FAILED; 300 dm_complete_request(rq, error); 301 } 302 303 static void end_clone_request(struct request *clone, blk_status_t error) 304 { 305 struct dm_rq_target_io *tio = clone->end_io_data; 306 307 dm_complete_request(tio->orig, error); 308 } 309 310 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) 311 { 312 blk_status_t r; 313 314 if (blk_queue_io_stat(clone->q)) 315 clone->rq_flags |= RQF_IO_STAT; 316 317 clone->start_time_ns = ktime_get_ns(); 318 r = blk_insert_cloned_request(clone->q, clone); 319 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) 320 /* must complete clone in terms of original request */ 321 dm_complete_request(rq, r); 322 return r; 323 } 324 325 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 326 void *data) 327 { 328 struct dm_rq_target_io *tio = data; 329 struct dm_rq_clone_bio_info *info = 330 container_of(bio, struct dm_rq_clone_bio_info, clone); 331 332 info->orig = bio_orig; 333 info->tio = tio; 334 bio->bi_end_io = end_clone_bio; 335 336 return 0; 337 } 338 339 static int setup_clone(struct request *clone, struct request *rq, 340 struct dm_rq_target_io *tio, gfp_t gfp_mask) 341 { 342 int r; 343 344 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, 345 dm_rq_bio_constructor, tio); 346 if (r) 347 return r; 348 349 clone->end_io = end_clone_request; 350 clone->end_io_data = tio; 351 352 tio->clone = clone; 353 354 return 0; 355 } 356 357 static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 358 struct mapped_device *md) 359 { 360 tio->md = md; 361 tio->ti = NULL; 362 tio->clone = NULL; 363 tio->orig = rq; 364 tio->error = 0; 365 tio->completed = 0; 366 /* 367 * Avoid initializing info for blk-mq; it passes 368 * target-specific data through info.ptr 369 * (see: dm_mq_init_request) 370 */ 371 if (!md->init_tio_pdu) 372 memset(&tio->info, 0, sizeof(tio->info)); 373 } 374 375 /* 376 * Returns: 377 * DM_MAPIO_* : the request has been processed as indicated 378 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued 379 * < 0 : the request was completed due to failure 380 */ 381 static int map_request(struct dm_rq_target_io *tio) 382 { 383 int r; 384 struct dm_target *ti = tio->ti; 385 struct mapped_device *md = tio->md; 386 struct request *rq = tio->orig; 387 struct request *clone = NULL; 388 blk_status_t ret; 389 390 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 391 switch (r) { 392 case DM_MAPIO_SUBMITTED: 393 /* The target has taken the I/O to submit by itself later */ 394 break; 395 case DM_MAPIO_REMAPPED: 396 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { 397 /* -ENOMEM */ 398 ti->type->release_clone_rq(clone, &tio->info); 399 return DM_MAPIO_REQUEUE; 400 } 401 402 /* The target has remapped the I/O so dispatch it */ 403 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 404 blk_rq_pos(rq)); 405 ret = dm_dispatch_clone_request(clone, rq); 406 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { 407 blk_rq_unprep_clone(clone); 408 blk_mq_cleanup_rq(clone); 409 tio->ti->type->release_clone_rq(clone, &tio->info); 410 tio->clone = NULL; 411 return DM_MAPIO_REQUEUE; 412 } 413 break; 414 case DM_MAPIO_REQUEUE: 415 /* The target wants to requeue the I/O */ 416 break; 417 case DM_MAPIO_DELAY_REQUEUE: 418 /* The target wants to requeue the I/O after a delay */ 419 dm_requeue_original_request(tio, true); 420 break; 421 case DM_MAPIO_KILL: 422 /* The target wants to complete the I/O */ 423 dm_kill_unmapped_request(rq, BLK_STS_IOERR); 424 break; 425 default: 426 DMWARN("unimplemented target map return value: %d", r); 427 BUG(); 428 } 429 430 return r; 431 } 432 433 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */ 434 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) 435 { 436 return sprintf(buf, "%u\n", 0); 437 } 438 439 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 440 const char *buf, size_t count) 441 { 442 return count; 443 } 444 445 static void dm_start_request(struct mapped_device *md, struct request *orig) 446 { 447 blk_mq_start_request(orig); 448 449 if (unlikely(dm_stats_used(&md->stats))) { 450 struct dm_rq_target_io *tio = tio_from_request(orig); 451 tio->duration_jiffies = jiffies; 452 tio->n_sectors = blk_rq_sectors(orig); 453 dm_stats_account_io(&md->stats, rq_data_dir(orig), 454 blk_rq_pos(orig), tio->n_sectors, false, 0, 455 &tio->stats_aux); 456 } 457 458 /* 459 * Hold the md reference here for the in-flight I/O. 460 * We can't rely on the reference count by device opener, 461 * because the device may be closed during the request completion 462 * when all bios are completed. 463 * See the comment in rq_completed() too. 464 */ 465 dm_get(md); 466 } 467 468 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 469 unsigned int hctx_idx, unsigned int numa_node) 470 { 471 struct mapped_device *md = set->driver_data; 472 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 473 474 /* 475 * Must initialize md member of tio, otherwise it won't 476 * be available in dm_mq_queue_rq. 477 */ 478 tio->md = md; 479 480 if (md->init_tio_pdu) { 481 /* target-specific per-io data is immediately after the tio */ 482 tio->info.ptr = tio + 1; 483 } 484 485 return 0; 486 } 487 488 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 489 const struct blk_mq_queue_data *bd) 490 { 491 struct request *rq = bd->rq; 492 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 493 struct mapped_device *md = tio->md; 494 struct dm_target *ti = md->immutable_target; 495 496 if (unlikely(!ti)) { 497 int srcu_idx; 498 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 499 500 ti = dm_table_find_target(map, 0); 501 dm_put_live_table(md, srcu_idx); 502 } 503 504 if (ti->type->busy && ti->type->busy(ti)) 505 return BLK_STS_RESOURCE; 506 507 dm_start_request(md, rq); 508 509 /* Init tio using md established in .init_request */ 510 init_tio(tio, rq, md); 511 512 /* 513 * Establish tio->ti before calling map_request(). 514 */ 515 tio->ti = ti; 516 517 /* Direct call is fine since .queue_rq allows allocations */ 518 if (map_request(tio) == DM_MAPIO_REQUEUE) { 519 /* Undo dm_start_request() before requeuing */ 520 rq_end_stats(md, rq); 521 rq_completed(md); 522 return BLK_STS_RESOURCE; 523 } 524 525 return BLK_STS_OK; 526 } 527 528 static const struct blk_mq_ops dm_mq_ops = { 529 .queue_rq = dm_mq_queue_rq, 530 .complete = dm_softirq_done, 531 .init_request = dm_mq_init_request, 532 }; 533 534 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) 535 { 536 struct request_queue *q; 537 struct dm_target *immutable_tgt; 538 int err; 539 540 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); 541 if (!md->tag_set) 542 return -ENOMEM; 543 544 md->tag_set->ops = &dm_mq_ops; 545 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); 546 md->tag_set->numa_node = md->numa_node_id; 547 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; 548 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); 549 md->tag_set->driver_data = md; 550 551 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); 552 immutable_tgt = dm_table_get_immutable_target(t); 553 if (immutable_tgt && immutable_tgt->per_io_data_size) { 554 /* any target-specific per-io data is immediately after the tio */ 555 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; 556 md->init_tio_pdu = true; 557 } 558 559 err = blk_mq_alloc_tag_set(md->tag_set); 560 if (err) 561 goto out_kfree_tag_set; 562 563 q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true); 564 if (IS_ERR(q)) { 565 err = PTR_ERR(q); 566 goto out_tag_set; 567 } 568 569 return 0; 570 571 out_tag_set: 572 blk_mq_free_tag_set(md->tag_set); 573 out_kfree_tag_set: 574 kfree(md->tag_set); 575 576 return err; 577 } 578 579 void dm_mq_cleanup_mapped_device(struct mapped_device *md) 580 { 581 if (md->tag_set) { 582 blk_mq_free_tag_set(md->tag_set); 583 kfree(md->tag_set); 584 } 585 } 586 587 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); 588 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); 589 590 /* Unused, but preserved for userspace compatibility */ 591 static bool use_blk_mq = true; 592 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); 593 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); 594 595 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); 596 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); 597 598 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); 599 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); 600