1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Facebook 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/blkdev.h> 8 #include <linux/debugfs.h> 9 10 #include <linux/blk-mq.h> 11 #include "blk.h" 12 #include "blk-mq.h" 13 #include "blk-mq-debugfs.h" 14 #include "blk-mq-sched.h" 15 #include "blk-mq-tag.h" 16 #include "blk-rq-qos.h" 17 18 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 19 { 20 if (stat->nr_samples) { 21 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu", 22 stat->nr_samples, stat->mean, stat->min, stat->max); 23 } else { 24 seq_puts(m, "samples=0"); 25 } 26 } 27 28 static int queue_poll_stat_show(void *data, struct seq_file *m) 29 { 30 struct request_queue *q = data; 31 int bucket; 32 33 if (!q->poll_stat) 34 return 0; 35 36 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) { 37 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket)); 38 print_stat(m, &q->poll_stat[2 * bucket]); 39 seq_puts(m, "\n"); 40 41 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket)); 42 print_stat(m, &q->poll_stat[2 * bucket + 1]); 43 seq_puts(m, "\n"); 44 } 45 return 0; 46 } 47 48 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) 49 __acquires(&q->requeue_lock) 50 { 51 struct request_queue *q = m->private; 52 53 spin_lock_irq(&q->requeue_lock); 54 return seq_list_start(&q->requeue_list, *pos); 55 } 56 57 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) 58 { 59 struct request_queue *q = m->private; 60 61 return seq_list_next(v, &q->requeue_list, pos); 62 } 63 64 static void queue_requeue_list_stop(struct seq_file *m, void *v) 65 __releases(&q->requeue_lock) 66 { 67 struct request_queue *q = m->private; 68 69 spin_unlock_irq(&q->requeue_lock); 70 } 71 72 static const struct seq_operations queue_requeue_list_seq_ops = { 73 .start = queue_requeue_list_start, 74 .next = queue_requeue_list_next, 75 .stop = queue_requeue_list_stop, 76 .show = blk_mq_debugfs_rq_show, 77 }; 78 79 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 80 const char *const *flag_name, int flag_name_count) 81 { 82 bool sep = false; 83 int i; 84 85 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 86 if (!(flags & BIT(i))) 87 continue; 88 if (sep) 89 seq_puts(m, "|"); 90 sep = true; 91 if (i < flag_name_count && flag_name[i]) 92 seq_puts(m, flag_name[i]); 93 else 94 seq_printf(m, "%d", i); 95 } 96 return 0; 97 } 98 99 static int queue_pm_only_show(void *data, struct seq_file *m) 100 { 101 struct request_queue *q = data; 102 103 seq_printf(m, "%d\n", atomic_read(&q->pm_only)); 104 return 0; 105 } 106 107 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 108 static const char *const blk_queue_flag_name[] = { 109 QUEUE_FLAG_NAME(STOPPED), 110 QUEUE_FLAG_NAME(DYING), 111 QUEUE_FLAG_NAME(NOMERGES), 112 QUEUE_FLAG_NAME(SAME_COMP), 113 QUEUE_FLAG_NAME(FAIL_IO), 114 QUEUE_FLAG_NAME(NONROT), 115 QUEUE_FLAG_NAME(IO_STAT), 116 QUEUE_FLAG_NAME(NOXMERGES), 117 QUEUE_FLAG_NAME(ADD_RANDOM), 118 QUEUE_FLAG_NAME(SAME_FORCE), 119 QUEUE_FLAG_NAME(INIT_DONE), 120 QUEUE_FLAG_NAME(STABLE_WRITES), 121 QUEUE_FLAG_NAME(POLL), 122 QUEUE_FLAG_NAME(WC), 123 QUEUE_FLAG_NAME(FUA), 124 QUEUE_FLAG_NAME(DAX), 125 QUEUE_FLAG_NAME(STATS), 126 QUEUE_FLAG_NAME(REGISTERED), 127 QUEUE_FLAG_NAME(QUIESCED), 128 QUEUE_FLAG_NAME(PCI_P2PDMA), 129 QUEUE_FLAG_NAME(ZONE_RESETALL), 130 QUEUE_FLAG_NAME(RQ_ALLOC_TIME), 131 QUEUE_FLAG_NAME(HCTX_ACTIVE), 132 QUEUE_FLAG_NAME(NOWAIT), 133 }; 134 #undef QUEUE_FLAG_NAME 135 136 static int queue_state_show(void *data, struct seq_file *m) 137 { 138 struct request_queue *q = data; 139 140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 141 ARRAY_SIZE(blk_queue_flag_name)); 142 seq_puts(m, "\n"); 143 return 0; 144 } 145 146 static ssize_t queue_state_write(void *data, const char __user *buf, 147 size_t count, loff_t *ppos) 148 { 149 struct request_queue *q = data; 150 char opbuf[16] = { }, *op; 151 152 /* 153 * The "state" attribute is removed when the queue is removed. Don't 154 * allow setting the state on a dying queue to avoid a use-after-free. 155 */ 156 if (blk_queue_dying(q)) 157 return -ENOENT; 158 159 if (count >= sizeof(opbuf)) { 160 pr_err("%s: operation too long\n", __func__); 161 goto inval; 162 } 163 164 if (copy_from_user(opbuf, buf, count)) 165 return -EFAULT; 166 op = strstrip(opbuf); 167 if (strcmp(op, "run") == 0) { 168 blk_mq_run_hw_queues(q, true); 169 } else if (strcmp(op, "start") == 0) { 170 blk_mq_start_stopped_hw_queues(q, true); 171 } else if (strcmp(op, "kick") == 0) { 172 blk_mq_kick_requeue_list(q); 173 } else { 174 pr_err("%s: unsupported operation '%s'\n", __func__, op); 175 inval: 176 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); 177 return -EINVAL; 178 } 179 return count; 180 } 181 182 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 183 { "poll_stat", 0400, queue_poll_stat_show }, 184 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, 185 { "pm_only", 0600, queue_pm_only_show, NULL }, 186 { "state", 0600, queue_state_show, queue_state_write }, 187 { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, 188 { }, 189 }; 190 191 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 192 static const char *const hctx_state_name[] = { 193 HCTX_STATE_NAME(STOPPED), 194 HCTX_STATE_NAME(TAG_ACTIVE), 195 HCTX_STATE_NAME(SCHED_RESTART), 196 HCTX_STATE_NAME(INACTIVE), 197 }; 198 #undef HCTX_STATE_NAME 199 200 static int hctx_state_show(void *data, struct seq_file *m) 201 { 202 struct blk_mq_hw_ctx *hctx = data; 203 204 blk_flags_show(m, hctx->state, hctx_state_name, 205 ARRAY_SIZE(hctx_state_name)); 206 seq_puts(m, "\n"); 207 return 0; 208 } 209 210 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 211 static const char *const alloc_policy_name[] = { 212 BLK_TAG_ALLOC_NAME(FIFO), 213 BLK_TAG_ALLOC_NAME(RR), 214 }; 215 #undef BLK_TAG_ALLOC_NAME 216 217 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 218 static const char *const hctx_flag_name[] = { 219 HCTX_FLAG_NAME(SHOULD_MERGE), 220 HCTX_FLAG_NAME(TAG_QUEUE_SHARED), 221 HCTX_FLAG_NAME(BLOCKING), 222 HCTX_FLAG_NAME(NO_SCHED), 223 HCTX_FLAG_NAME(STACKING), 224 HCTX_FLAG_NAME(TAG_HCTX_SHARED), 225 }; 226 #undef HCTX_FLAG_NAME 227 228 static int hctx_flags_show(void *data, struct seq_file *m) 229 { 230 struct blk_mq_hw_ctx *hctx = data; 231 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 232 233 seq_puts(m, "alloc_policy="); 234 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 235 alloc_policy_name[alloc_policy]) 236 seq_puts(m, alloc_policy_name[alloc_policy]); 237 else 238 seq_printf(m, "%d", alloc_policy); 239 seq_puts(m, " "); 240 blk_flags_show(m, 241 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 242 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 243 seq_puts(m, "\n"); 244 return 0; 245 } 246 247 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 248 static const char *const cmd_flag_name[] = { 249 CMD_FLAG_NAME(FAILFAST_DEV), 250 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 251 CMD_FLAG_NAME(FAILFAST_DRIVER), 252 CMD_FLAG_NAME(SYNC), 253 CMD_FLAG_NAME(META), 254 CMD_FLAG_NAME(PRIO), 255 CMD_FLAG_NAME(NOMERGE), 256 CMD_FLAG_NAME(IDLE), 257 CMD_FLAG_NAME(INTEGRITY), 258 CMD_FLAG_NAME(FUA), 259 CMD_FLAG_NAME(PREFLUSH), 260 CMD_FLAG_NAME(RAHEAD), 261 CMD_FLAG_NAME(BACKGROUND), 262 CMD_FLAG_NAME(NOWAIT), 263 CMD_FLAG_NAME(NOUNMAP), 264 CMD_FLAG_NAME(POLLED), 265 }; 266 #undef CMD_FLAG_NAME 267 268 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 269 static const char *const rqf_name[] = { 270 RQF_NAME(STARTED), 271 RQF_NAME(SOFTBARRIER), 272 RQF_NAME(FLUSH_SEQ), 273 RQF_NAME(MIXED_MERGE), 274 RQF_NAME(MQ_INFLIGHT), 275 RQF_NAME(DONTPREP), 276 RQF_NAME(FAILED), 277 RQF_NAME(QUIET), 278 RQF_NAME(ELVPRIV), 279 RQF_NAME(IO_STAT), 280 RQF_NAME(PM), 281 RQF_NAME(HASHED), 282 RQF_NAME(STATS), 283 RQF_NAME(SPECIAL_PAYLOAD), 284 RQF_NAME(ZONE_WRITE_LOCKED), 285 RQF_NAME(MQ_POLL_SLEPT), 286 RQF_NAME(ELV), 287 }; 288 #undef RQF_NAME 289 290 static const char *const blk_mq_rq_state_name_array[] = { 291 [MQ_RQ_IDLE] = "idle", 292 [MQ_RQ_IN_FLIGHT] = "in_flight", 293 [MQ_RQ_COMPLETE] = "complete", 294 }; 295 296 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) 297 { 298 if (WARN_ON_ONCE((unsigned int)rq_state >= 299 ARRAY_SIZE(blk_mq_rq_state_name_array))) 300 return "(?)"; 301 return blk_mq_rq_state_name_array[rq_state]; 302 } 303 304 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 305 { 306 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 307 const enum req_op op = req_op(rq); 308 const char *op_str = blk_op_str(op); 309 310 seq_printf(m, "%p {.op=", rq); 311 if (strcmp(op_str, "UNKNOWN") == 0) 312 seq_printf(m, "%u", op); 313 else 314 seq_printf(m, "%s", op_str); 315 seq_puts(m, ", .cmd_flags="); 316 blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK), 317 cmd_flag_name, ARRAY_SIZE(cmd_flag_name)); 318 seq_puts(m, ", .rq_flags="); 319 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 320 ARRAY_SIZE(rqf_name)); 321 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); 322 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 323 rq->internal_tag); 324 if (mq_ops->show_rq) 325 mq_ops->show_rq(m, rq); 326 seq_puts(m, "}\n"); 327 return 0; 328 } 329 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 330 331 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 332 { 333 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 334 } 335 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 336 337 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 338 __acquires(&hctx->lock) 339 { 340 struct blk_mq_hw_ctx *hctx = m->private; 341 342 spin_lock(&hctx->lock); 343 return seq_list_start(&hctx->dispatch, *pos); 344 } 345 346 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 347 { 348 struct blk_mq_hw_ctx *hctx = m->private; 349 350 return seq_list_next(v, &hctx->dispatch, pos); 351 } 352 353 static void hctx_dispatch_stop(struct seq_file *m, void *v) 354 __releases(&hctx->lock) 355 { 356 struct blk_mq_hw_ctx *hctx = m->private; 357 358 spin_unlock(&hctx->lock); 359 } 360 361 static const struct seq_operations hctx_dispatch_seq_ops = { 362 .start = hctx_dispatch_start, 363 .next = hctx_dispatch_next, 364 .stop = hctx_dispatch_stop, 365 .show = blk_mq_debugfs_rq_show, 366 }; 367 368 struct show_busy_params { 369 struct seq_file *m; 370 struct blk_mq_hw_ctx *hctx; 371 }; 372 373 /* 374 * Note: the state of a request may change while this function is in progress, 375 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to 376 * keep iterating requests. 377 */ 378 static bool hctx_show_busy_rq(struct request *rq, void *data) 379 { 380 const struct show_busy_params *params = data; 381 382 if (rq->mq_hctx == params->hctx) 383 __blk_mq_debugfs_rq_show(params->m, rq); 384 385 return true; 386 } 387 388 static int hctx_busy_show(void *data, struct seq_file *m) 389 { 390 struct blk_mq_hw_ctx *hctx = data; 391 struct show_busy_params params = { .m = m, .hctx = hctx }; 392 393 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, 394 ¶ms); 395 396 return 0; 397 } 398 399 static const char *const hctx_types[] = { 400 [HCTX_TYPE_DEFAULT] = "default", 401 [HCTX_TYPE_READ] = "read", 402 [HCTX_TYPE_POLL] = "poll", 403 }; 404 405 static int hctx_type_show(void *data, struct seq_file *m) 406 { 407 struct blk_mq_hw_ctx *hctx = data; 408 409 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); 410 seq_printf(m, "%s\n", hctx_types[hctx->type]); 411 return 0; 412 } 413 414 static int hctx_ctx_map_show(void *data, struct seq_file *m) 415 { 416 struct blk_mq_hw_ctx *hctx = data; 417 418 sbitmap_bitmap_show(&hctx->ctx_map, m); 419 return 0; 420 } 421 422 static void blk_mq_debugfs_tags_show(struct seq_file *m, 423 struct blk_mq_tags *tags) 424 { 425 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 426 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 427 seq_printf(m, "active_queues=%d\n", 428 atomic_read(&tags->active_queues)); 429 430 seq_puts(m, "\nbitmap_tags:\n"); 431 sbitmap_queue_show(&tags->bitmap_tags, m); 432 433 if (tags->nr_reserved_tags) { 434 seq_puts(m, "\nbreserved_tags:\n"); 435 sbitmap_queue_show(&tags->breserved_tags, m); 436 } 437 } 438 439 static int hctx_tags_show(void *data, struct seq_file *m) 440 { 441 struct blk_mq_hw_ctx *hctx = data; 442 struct request_queue *q = hctx->queue; 443 int res; 444 445 res = mutex_lock_interruptible(&q->sysfs_lock); 446 if (res) 447 goto out; 448 if (hctx->tags) 449 blk_mq_debugfs_tags_show(m, hctx->tags); 450 mutex_unlock(&q->sysfs_lock); 451 452 out: 453 return res; 454 } 455 456 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 457 { 458 struct blk_mq_hw_ctx *hctx = data; 459 struct request_queue *q = hctx->queue; 460 int res; 461 462 res = mutex_lock_interruptible(&q->sysfs_lock); 463 if (res) 464 goto out; 465 if (hctx->tags) 466 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); 467 mutex_unlock(&q->sysfs_lock); 468 469 out: 470 return res; 471 } 472 473 static int hctx_sched_tags_show(void *data, struct seq_file *m) 474 { 475 struct blk_mq_hw_ctx *hctx = data; 476 struct request_queue *q = hctx->queue; 477 int res; 478 479 res = mutex_lock_interruptible(&q->sysfs_lock); 480 if (res) 481 goto out; 482 if (hctx->sched_tags) 483 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 484 mutex_unlock(&q->sysfs_lock); 485 486 out: 487 return res; 488 } 489 490 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 491 { 492 struct blk_mq_hw_ctx *hctx = data; 493 struct request_queue *q = hctx->queue; 494 int res; 495 496 res = mutex_lock_interruptible(&q->sysfs_lock); 497 if (res) 498 goto out; 499 if (hctx->sched_tags) 500 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); 501 mutex_unlock(&q->sysfs_lock); 502 503 out: 504 return res; 505 } 506 507 static int hctx_run_show(void *data, struct seq_file *m) 508 { 509 struct blk_mq_hw_ctx *hctx = data; 510 511 seq_printf(m, "%lu\n", hctx->run); 512 return 0; 513 } 514 515 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 516 loff_t *ppos) 517 { 518 struct blk_mq_hw_ctx *hctx = data; 519 520 hctx->run = 0; 521 return count; 522 } 523 524 static int hctx_active_show(void *data, struct seq_file *m) 525 { 526 struct blk_mq_hw_ctx *hctx = data; 527 528 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx)); 529 return 0; 530 } 531 532 static int hctx_dispatch_busy_show(void *data, struct seq_file *m) 533 { 534 struct blk_mq_hw_ctx *hctx = data; 535 536 seq_printf(m, "%u\n", hctx->dispatch_busy); 537 return 0; 538 } 539 540 #define CTX_RQ_SEQ_OPS(name, type) \ 541 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ 542 __acquires(&ctx->lock) \ 543 { \ 544 struct blk_mq_ctx *ctx = m->private; \ 545 \ 546 spin_lock(&ctx->lock); \ 547 return seq_list_start(&ctx->rq_lists[type], *pos); \ 548 } \ 549 \ 550 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ 551 loff_t *pos) \ 552 { \ 553 struct blk_mq_ctx *ctx = m->private; \ 554 \ 555 return seq_list_next(v, &ctx->rq_lists[type], pos); \ 556 } \ 557 \ 558 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ 559 __releases(&ctx->lock) \ 560 { \ 561 struct blk_mq_ctx *ctx = m->private; \ 562 \ 563 spin_unlock(&ctx->lock); \ 564 } \ 565 \ 566 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ 567 .start = ctx_##name##_rq_list_start, \ 568 .next = ctx_##name##_rq_list_next, \ 569 .stop = ctx_##name##_rq_list_stop, \ 570 .show = blk_mq_debugfs_rq_show, \ 571 } 572 573 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); 574 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); 575 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); 576 577 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 578 { 579 const struct blk_mq_debugfs_attr *attr = m->private; 580 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 581 582 return attr->show(data, m); 583 } 584 585 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 586 size_t count, loff_t *ppos) 587 { 588 struct seq_file *m = file->private_data; 589 const struct blk_mq_debugfs_attr *attr = m->private; 590 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 591 592 /* 593 * Attributes that only implement .seq_ops are read-only and 'attr' is 594 * the same with 'data' in this case. 595 */ 596 if (attr == data || !attr->write) 597 return -EPERM; 598 599 return attr->write(data, buf, count, ppos); 600 } 601 602 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 603 { 604 const struct blk_mq_debugfs_attr *attr = inode->i_private; 605 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 606 struct seq_file *m; 607 int ret; 608 609 if (attr->seq_ops) { 610 ret = seq_open(file, attr->seq_ops); 611 if (!ret) { 612 m = file->private_data; 613 m->private = data; 614 } 615 return ret; 616 } 617 618 if (WARN_ON_ONCE(!attr->show)) 619 return -EPERM; 620 621 return single_open(file, blk_mq_debugfs_show, inode->i_private); 622 } 623 624 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 625 { 626 const struct blk_mq_debugfs_attr *attr = inode->i_private; 627 628 if (attr->show) 629 return single_release(inode, file); 630 631 return seq_release(inode, file); 632 } 633 634 static const struct file_operations blk_mq_debugfs_fops = { 635 .open = blk_mq_debugfs_open, 636 .read = seq_read, 637 .write = blk_mq_debugfs_write, 638 .llseek = seq_lseek, 639 .release = blk_mq_debugfs_release, 640 }; 641 642 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 643 {"state", 0400, hctx_state_show}, 644 {"flags", 0400, hctx_flags_show}, 645 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 646 {"busy", 0400, hctx_busy_show}, 647 {"ctx_map", 0400, hctx_ctx_map_show}, 648 {"tags", 0400, hctx_tags_show}, 649 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 650 {"sched_tags", 0400, hctx_sched_tags_show}, 651 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 652 {"run", 0600, hctx_run_show, hctx_run_write}, 653 {"active", 0400, hctx_active_show}, 654 {"dispatch_busy", 0400, hctx_dispatch_busy_show}, 655 {"type", 0400, hctx_type_show}, 656 {}, 657 }; 658 659 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 660 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, 661 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, 662 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, 663 {}, 664 }; 665 666 static void debugfs_create_files(struct dentry *parent, void *data, 667 const struct blk_mq_debugfs_attr *attr) 668 { 669 if (IS_ERR_OR_NULL(parent)) 670 return; 671 672 d_inode(parent)->i_private = data; 673 674 for (; attr->name; attr++) 675 debugfs_create_file(attr->name, attr->mode, parent, 676 (void *)attr, &blk_mq_debugfs_fops); 677 } 678 679 void blk_mq_debugfs_register(struct request_queue *q) 680 { 681 struct blk_mq_hw_ctx *hctx; 682 unsigned long i; 683 684 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); 685 686 /* 687 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir 688 * didn't exist yet (because we don't know what to name the directory 689 * until the queue is registered to a gendisk). 690 */ 691 if (q->elevator && !q->sched_debugfs_dir) 692 blk_mq_debugfs_register_sched(q); 693 694 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ 695 queue_for_each_hw_ctx(q, hctx, i) { 696 if (!hctx->debugfs_dir) 697 blk_mq_debugfs_register_hctx(q, hctx); 698 if (q->elevator && !hctx->sched_debugfs_dir) 699 blk_mq_debugfs_register_sched_hctx(q, hctx); 700 } 701 702 if (q->rq_qos) { 703 struct rq_qos *rqos = q->rq_qos; 704 705 while (rqos) { 706 blk_mq_debugfs_register_rqos(rqos); 707 rqos = rqos->next; 708 } 709 } 710 } 711 712 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 713 struct blk_mq_ctx *ctx) 714 { 715 struct dentry *ctx_dir; 716 char name[20]; 717 718 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 719 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 720 721 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); 722 } 723 724 void blk_mq_debugfs_register_hctx(struct request_queue *q, 725 struct blk_mq_hw_ctx *hctx) 726 { 727 struct blk_mq_ctx *ctx; 728 char name[20]; 729 int i; 730 731 if (!q->debugfs_dir) 732 return; 733 734 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 735 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 736 737 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); 738 739 hctx_for_each_ctx(hctx, ctx, i) 740 blk_mq_debugfs_register_ctx(hctx, ctx); 741 } 742 743 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 744 { 745 if (!hctx->queue->debugfs_dir) 746 return; 747 debugfs_remove_recursive(hctx->debugfs_dir); 748 hctx->sched_debugfs_dir = NULL; 749 hctx->debugfs_dir = NULL; 750 } 751 752 void blk_mq_debugfs_register_hctxs(struct request_queue *q) 753 { 754 struct blk_mq_hw_ctx *hctx; 755 unsigned long i; 756 757 queue_for_each_hw_ctx(q, hctx, i) 758 blk_mq_debugfs_register_hctx(q, hctx); 759 } 760 761 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 762 { 763 struct blk_mq_hw_ctx *hctx; 764 unsigned long i; 765 766 queue_for_each_hw_ctx(q, hctx, i) 767 blk_mq_debugfs_unregister_hctx(hctx); 768 } 769 770 void blk_mq_debugfs_register_sched(struct request_queue *q) 771 { 772 struct elevator_type *e = q->elevator->type; 773 774 lockdep_assert_held(&q->debugfs_mutex); 775 776 /* 777 * If the parent directory has not been created yet, return, we will be 778 * called again later on and the directory/files will be created then. 779 */ 780 if (!q->debugfs_dir) 781 return; 782 783 if (!e->queue_debugfs_attrs) 784 return; 785 786 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 787 788 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); 789 } 790 791 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 792 { 793 lockdep_assert_held(&q->debugfs_mutex); 794 795 debugfs_remove_recursive(q->sched_debugfs_dir); 796 q->sched_debugfs_dir = NULL; 797 } 798 799 static const char *rq_qos_id_to_name(enum rq_qos_id id) 800 { 801 switch (id) { 802 case RQ_QOS_WBT: 803 return "wbt"; 804 case RQ_QOS_LATENCY: 805 return "latency"; 806 case RQ_QOS_COST: 807 return "cost"; 808 case RQ_QOS_IOPRIO: 809 return "ioprio"; 810 } 811 return "unknown"; 812 } 813 814 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) 815 { 816 lockdep_assert_held(&rqos->q->debugfs_mutex); 817 818 if (!rqos->q->debugfs_dir) 819 return; 820 debugfs_remove_recursive(rqos->debugfs_dir); 821 rqos->debugfs_dir = NULL; 822 } 823 824 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) 825 { 826 struct request_queue *q = rqos->q; 827 const char *dir_name = rq_qos_id_to_name(rqos->id); 828 829 lockdep_assert_held(&q->debugfs_mutex); 830 831 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) 832 return; 833 834 if (!q->rqos_debugfs_dir) 835 q->rqos_debugfs_dir = debugfs_create_dir("rqos", 836 q->debugfs_dir); 837 838 rqos->debugfs_dir = debugfs_create_dir(dir_name, 839 rqos->q->rqos_debugfs_dir); 840 841 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); 842 } 843 844 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 845 struct blk_mq_hw_ctx *hctx) 846 { 847 struct elevator_type *e = q->elevator->type; 848 849 lockdep_assert_held(&q->debugfs_mutex); 850 851 /* 852 * If the parent debugfs directory has not been created yet, return; 853 * We will be called again later on with appropriate parent debugfs 854 * directory from blk_register_queue() 855 */ 856 if (!hctx->debugfs_dir) 857 return; 858 859 if (!e->hctx_debugfs_attrs) 860 return; 861 862 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 863 hctx->debugfs_dir); 864 debugfs_create_files(hctx->sched_debugfs_dir, hctx, 865 e->hctx_debugfs_attrs); 866 } 867 868 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 869 { 870 lockdep_assert_held(&hctx->queue->debugfs_mutex); 871 872 if (!hctx->queue->debugfs_dir) 873 return; 874 debugfs_remove_recursive(hctx->sched_debugfs_dir); 875 hctx->sched_debugfs_dir = NULL; 876 } 877