1 /* 2 * Copyright (C) 2017 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <https://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/blkdev.h> 19 #include <linux/debugfs.h> 20 21 #include <linux/blk-mq.h> 22 #include "blk.h" 23 #include "blk-mq.h" 24 #include "blk-mq-debugfs.h" 25 #include "blk-mq-tag.h" 26 27 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 28 const char *const *flag_name, int flag_name_count) 29 { 30 bool sep = false; 31 int i; 32 33 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 34 if (!(flags & BIT(i))) 35 continue; 36 if (sep) 37 seq_puts(m, "|"); 38 sep = true; 39 if (i < flag_name_count && flag_name[i]) 40 seq_puts(m, flag_name[i]); 41 else 42 seq_printf(m, "%d", i); 43 } 44 return 0; 45 } 46 47 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 48 static const char *const blk_queue_flag_name[] = { 49 QUEUE_FLAG_NAME(QUEUED), 50 QUEUE_FLAG_NAME(STOPPED), 51 QUEUE_FLAG_NAME(SYNCFULL), 52 QUEUE_FLAG_NAME(ASYNCFULL), 53 QUEUE_FLAG_NAME(DYING), 54 QUEUE_FLAG_NAME(BYPASS), 55 QUEUE_FLAG_NAME(BIDI), 56 QUEUE_FLAG_NAME(NOMERGES), 57 QUEUE_FLAG_NAME(SAME_COMP), 58 QUEUE_FLAG_NAME(FAIL_IO), 59 QUEUE_FLAG_NAME(STACKABLE), 60 QUEUE_FLAG_NAME(NONROT), 61 QUEUE_FLAG_NAME(IO_STAT), 62 QUEUE_FLAG_NAME(DISCARD), 63 QUEUE_FLAG_NAME(NOXMERGES), 64 QUEUE_FLAG_NAME(ADD_RANDOM), 65 QUEUE_FLAG_NAME(SECERASE), 66 QUEUE_FLAG_NAME(SAME_FORCE), 67 QUEUE_FLAG_NAME(DEAD), 68 QUEUE_FLAG_NAME(INIT_DONE), 69 QUEUE_FLAG_NAME(NO_SG_MERGE), 70 QUEUE_FLAG_NAME(POLL), 71 QUEUE_FLAG_NAME(WC), 72 QUEUE_FLAG_NAME(FUA), 73 QUEUE_FLAG_NAME(FLUSH_NQ), 74 QUEUE_FLAG_NAME(DAX), 75 QUEUE_FLAG_NAME(STATS), 76 QUEUE_FLAG_NAME(POLL_STATS), 77 QUEUE_FLAG_NAME(REGISTERED), 78 }; 79 #undef QUEUE_FLAG_NAME 80 81 static int queue_state_show(void *data, struct seq_file *m) 82 { 83 struct request_queue *q = data; 84 85 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 86 ARRAY_SIZE(blk_queue_flag_name)); 87 seq_puts(m, "\n"); 88 return 0; 89 } 90 91 static ssize_t queue_state_write(void *data, const char __user *buf, 92 size_t count, loff_t *ppos) 93 { 94 struct request_queue *q = data; 95 char opbuf[16] = { }, *op; 96 97 /* 98 * The "state" attribute is removed after blk_cleanup_queue() has called 99 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 100 * triggering a use-after-free. 101 */ 102 if (blk_queue_dead(q)) 103 return -ENOENT; 104 105 if (count >= sizeof(opbuf)) { 106 pr_err("%s: operation too long\n", __func__); 107 goto inval; 108 } 109 110 if (copy_from_user(opbuf, buf, count)) 111 return -EFAULT; 112 op = strstrip(opbuf); 113 if (strcmp(op, "run") == 0) { 114 blk_mq_run_hw_queues(q, true); 115 } else if (strcmp(op, "start") == 0) { 116 blk_mq_start_stopped_hw_queues(q, true); 117 } else { 118 pr_err("%s: unsupported operation '%s'\n", __func__, op); 119 inval: 120 pr_err("%s: use either 'run' or 'start'\n", __func__); 121 return -EINVAL; 122 } 123 return count; 124 } 125 126 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 127 { 128 if (stat->nr_samples) { 129 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu", 130 stat->nr_samples, stat->mean, stat->min, stat->max); 131 } else { 132 seq_puts(m, "samples=0"); 133 } 134 } 135 136 static int queue_poll_stat_show(void *data, struct seq_file *m) 137 { 138 struct request_queue *q = data; 139 int bucket; 140 141 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { 142 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket)); 143 print_stat(m, &q->poll_stat[2*bucket]); 144 seq_puts(m, "\n"); 145 146 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket)); 147 print_stat(m, &q->poll_stat[2*bucket+1]); 148 seq_puts(m, "\n"); 149 } 150 return 0; 151 } 152 153 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 154 static const char *const hctx_state_name[] = { 155 HCTX_STATE_NAME(STOPPED), 156 HCTX_STATE_NAME(TAG_ACTIVE), 157 HCTX_STATE_NAME(SCHED_RESTART), 158 HCTX_STATE_NAME(TAG_WAITING), 159 HCTX_STATE_NAME(START_ON_RUN), 160 }; 161 #undef HCTX_STATE_NAME 162 163 static int hctx_state_show(void *data, struct seq_file *m) 164 { 165 struct blk_mq_hw_ctx *hctx = data; 166 167 blk_flags_show(m, hctx->state, hctx_state_name, 168 ARRAY_SIZE(hctx_state_name)); 169 seq_puts(m, "\n"); 170 return 0; 171 } 172 173 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 174 static const char *const alloc_policy_name[] = { 175 BLK_TAG_ALLOC_NAME(FIFO), 176 BLK_TAG_ALLOC_NAME(RR), 177 }; 178 #undef BLK_TAG_ALLOC_NAME 179 180 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 181 static const char *const hctx_flag_name[] = { 182 HCTX_FLAG_NAME(SHOULD_MERGE), 183 HCTX_FLAG_NAME(TAG_SHARED), 184 HCTX_FLAG_NAME(SG_MERGE), 185 HCTX_FLAG_NAME(BLOCKING), 186 HCTX_FLAG_NAME(NO_SCHED), 187 }; 188 #undef HCTX_FLAG_NAME 189 190 static int hctx_flags_show(void *data, struct seq_file *m) 191 { 192 struct blk_mq_hw_ctx *hctx = data; 193 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 194 195 seq_puts(m, "alloc_policy="); 196 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && 197 alloc_policy_name[alloc_policy]) 198 seq_puts(m, alloc_policy_name[alloc_policy]); 199 else 200 seq_printf(m, "%d", alloc_policy); 201 seq_puts(m, " "); 202 blk_flags_show(m, 203 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), 204 hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); 205 seq_puts(m, "\n"); 206 return 0; 207 } 208 209 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 210 static const char *const op_name[] = { 211 REQ_OP_NAME(READ), 212 REQ_OP_NAME(WRITE), 213 REQ_OP_NAME(FLUSH), 214 REQ_OP_NAME(DISCARD), 215 REQ_OP_NAME(ZONE_REPORT), 216 REQ_OP_NAME(SECURE_ERASE), 217 REQ_OP_NAME(ZONE_RESET), 218 REQ_OP_NAME(WRITE_SAME), 219 REQ_OP_NAME(WRITE_ZEROES), 220 REQ_OP_NAME(SCSI_IN), 221 REQ_OP_NAME(SCSI_OUT), 222 REQ_OP_NAME(DRV_IN), 223 REQ_OP_NAME(DRV_OUT), 224 }; 225 #undef REQ_OP_NAME 226 227 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 228 static const char *const cmd_flag_name[] = { 229 CMD_FLAG_NAME(FAILFAST_DEV), 230 CMD_FLAG_NAME(FAILFAST_TRANSPORT), 231 CMD_FLAG_NAME(FAILFAST_DRIVER), 232 CMD_FLAG_NAME(SYNC), 233 CMD_FLAG_NAME(META), 234 CMD_FLAG_NAME(PRIO), 235 CMD_FLAG_NAME(NOMERGE), 236 CMD_FLAG_NAME(IDLE), 237 CMD_FLAG_NAME(INTEGRITY), 238 CMD_FLAG_NAME(FUA), 239 CMD_FLAG_NAME(PREFLUSH), 240 CMD_FLAG_NAME(RAHEAD), 241 CMD_FLAG_NAME(BACKGROUND), 242 CMD_FLAG_NAME(NOUNMAP), 243 }; 244 #undef CMD_FLAG_NAME 245 246 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 247 static const char *const rqf_name[] = { 248 RQF_NAME(SORTED), 249 RQF_NAME(STARTED), 250 RQF_NAME(QUEUED), 251 RQF_NAME(SOFTBARRIER), 252 RQF_NAME(FLUSH_SEQ), 253 RQF_NAME(MIXED_MERGE), 254 RQF_NAME(MQ_INFLIGHT), 255 RQF_NAME(DONTPREP), 256 RQF_NAME(PREEMPT), 257 RQF_NAME(COPY_USER), 258 RQF_NAME(FAILED), 259 RQF_NAME(QUIET), 260 RQF_NAME(ELVPRIV), 261 RQF_NAME(IO_STAT), 262 RQF_NAME(ALLOCED), 263 RQF_NAME(PM), 264 RQF_NAME(HASHED), 265 RQF_NAME(STATS), 266 RQF_NAME(SPECIAL_PAYLOAD), 267 }; 268 #undef RQF_NAME 269 270 #define RQAF_NAME(name) [REQ_ATOM_##name] = #name 271 static const char *const rqaf_name[] = { 272 RQAF_NAME(COMPLETE), 273 RQAF_NAME(STARTED), 274 RQAF_NAME(POLL_SLEPT), 275 }; 276 #undef RQAF_NAME 277 278 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 279 { 280 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 281 const unsigned int op = rq->cmd_flags & REQ_OP_MASK; 282 283 seq_printf(m, "%p {.op=", rq); 284 if (op < ARRAY_SIZE(op_name) && op_name[op]) 285 seq_printf(m, "%s", op_name[op]); 286 else 287 seq_printf(m, "%d", op); 288 seq_puts(m, ", .cmd_flags="); 289 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, 290 ARRAY_SIZE(cmd_flag_name)); 291 seq_puts(m, ", .rq_flags="); 292 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, 293 ARRAY_SIZE(rqf_name)); 294 seq_puts(m, ", .atomic_flags="); 295 blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name)); 296 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, 297 rq->internal_tag); 298 if (mq_ops->show_rq) 299 mq_ops->show_rq(m, rq); 300 seq_puts(m, "}\n"); 301 return 0; 302 } 303 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 304 305 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 306 { 307 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 308 } 309 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 310 311 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 312 __acquires(&hctx->lock) 313 { 314 struct blk_mq_hw_ctx *hctx = m->private; 315 316 spin_lock(&hctx->lock); 317 return seq_list_start(&hctx->dispatch, *pos); 318 } 319 320 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 321 { 322 struct blk_mq_hw_ctx *hctx = m->private; 323 324 return seq_list_next(v, &hctx->dispatch, pos); 325 } 326 327 static void hctx_dispatch_stop(struct seq_file *m, void *v) 328 __releases(&hctx->lock) 329 { 330 struct blk_mq_hw_ctx *hctx = m->private; 331 332 spin_unlock(&hctx->lock); 333 } 334 335 static const struct seq_operations hctx_dispatch_seq_ops = { 336 .start = hctx_dispatch_start, 337 .next = hctx_dispatch_next, 338 .stop = hctx_dispatch_stop, 339 .show = blk_mq_debugfs_rq_show, 340 }; 341 342 static int hctx_ctx_map_show(void *data, struct seq_file *m) 343 { 344 struct blk_mq_hw_ctx *hctx = data; 345 346 sbitmap_bitmap_show(&hctx->ctx_map, m); 347 return 0; 348 } 349 350 static void blk_mq_debugfs_tags_show(struct seq_file *m, 351 struct blk_mq_tags *tags) 352 { 353 seq_printf(m, "nr_tags=%u\n", tags->nr_tags); 354 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); 355 seq_printf(m, "active_queues=%d\n", 356 atomic_read(&tags->active_queues)); 357 358 seq_puts(m, "\nbitmap_tags:\n"); 359 sbitmap_queue_show(&tags->bitmap_tags, m); 360 361 if (tags->nr_reserved_tags) { 362 seq_puts(m, "\nbreserved_tags:\n"); 363 sbitmap_queue_show(&tags->breserved_tags, m); 364 } 365 } 366 367 static int hctx_tags_show(void *data, struct seq_file *m) 368 { 369 struct blk_mq_hw_ctx *hctx = data; 370 struct request_queue *q = hctx->queue; 371 int res; 372 373 res = mutex_lock_interruptible(&q->sysfs_lock); 374 if (res) 375 goto out; 376 if (hctx->tags) 377 blk_mq_debugfs_tags_show(m, hctx->tags); 378 mutex_unlock(&q->sysfs_lock); 379 380 out: 381 return res; 382 } 383 384 static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 385 { 386 struct blk_mq_hw_ctx *hctx = data; 387 struct request_queue *q = hctx->queue; 388 int res; 389 390 res = mutex_lock_interruptible(&q->sysfs_lock); 391 if (res) 392 goto out; 393 if (hctx->tags) 394 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); 395 mutex_unlock(&q->sysfs_lock); 396 397 out: 398 return res; 399 } 400 401 static int hctx_sched_tags_show(void *data, struct seq_file *m) 402 { 403 struct blk_mq_hw_ctx *hctx = data; 404 struct request_queue *q = hctx->queue; 405 int res; 406 407 res = mutex_lock_interruptible(&q->sysfs_lock); 408 if (res) 409 goto out; 410 if (hctx->sched_tags) 411 blk_mq_debugfs_tags_show(m, hctx->sched_tags); 412 mutex_unlock(&q->sysfs_lock); 413 414 out: 415 return res; 416 } 417 418 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 419 { 420 struct blk_mq_hw_ctx *hctx = data; 421 struct request_queue *q = hctx->queue; 422 int res; 423 424 res = mutex_lock_interruptible(&q->sysfs_lock); 425 if (res) 426 goto out; 427 if (hctx->sched_tags) 428 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); 429 mutex_unlock(&q->sysfs_lock); 430 431 out: 432 return res; 433 } 434 435 static int hctx_io_poll_show(void *data, struct seq_file *m) 436 { 437 struct blk_mq_hw_ctx *hctx = data; 438 439 seq_printf(m, "considered=%lu\n", hctx->poll_considered); 440 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); 441 seq_printf(m, "success=%lu\n", hctx->poll_success); 442 return 0; 443 } 444 445 static ssize_t hctx_io_poll_write(void *data, const char __user *buf, 446 size_t count, loff_t *ppos) 447 { 448 struct blk_mq_hw_ctx *hctx = data; 449 450 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; 451 return count; 452 } 453 454 static int hctx_dispatched_show(void *data, struct seq_file *m) 455 { 456 struct blk_mq_hw_ctx *hctx = data; 457 int i; 458 459 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); 460 461 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) { 462 unsigned int d = 1U << (i - 1); 463 464 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); 465 } 466 467 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); 468 return 0; 469 } 470 471 static ssize_t hctx_dispatched_write(void *data, const char __user *buf, 472 size_t count, loff_t *ppos) 473 { 474 struct blk_mq_hw_ctx *hctx = data; 475 int i; 476 477 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) 478 hctx->dispatched[i] = 0; 479 return count; 480 } 481 482 static int hctx_queued_show(void *data, struct seq_file *m) 483 { 484 struct blk_mq_hw_ctx *hctx = data; 485 486 seq_printf(m, "%lu\n", hctx->queued); 487 return 0; 488 } 489 490 static ssize_t hctx_queued_write(void *data, const char __user *buf, 491 size_t count, loff_t *ppos) 492 { 493 struct blk_mq_hw_ctx *hctx = data; 494 495 hctx->queued = 0; 496 return count; 497 } 498 499 static int hctx_run_show(void *data, struct seq_file *m) 500 { 501 struct blk_mq_hw_ctx *hctx = data; 502 503 seq_printf(m, "%lu\n", hctx->run); 504 return 0; 505 } 506 507 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 508 loff_t *ppos) 509 { 510 struct blk_mq_hw_ctx *hctx = data; 511 512 hctx->run = 0; 513 return count; 514 } 515 516 static int hctx_active_show(void *data, struct seq_file *m) 517 { 518 struct blk_mq_hw_ctx *hctx = data; 519 520 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); 521 return 0; 522 } 523 524 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos) 525 __acquires(&ctx->lock) 526 { 527 struct blk_mq_ctx *ctx = m->private; 528 529 spin_lock(&ctx->lock); 530 return seq_list_start(&ctx->rq_list, *pos); 531 } 532 533 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos) 534 { 535 struct blk_mq_ctx *ctx = m->private; 536 537 return seq_list_next(v, &ctx->rq_list, pos); 538 } 539 540 static void ctx_rq_list_stop(struct seq_file *m, void *v) 541 __releases(&ctx->lock) 542 { 543 struct blk_mq_ctx *ctx = m->private; 544 545 spin_unlock(&ctx->lock); 546 } 547 548 static const struct seq_operations ctx_rq_list_seq_ops = { 549 .start = ctx_rq_list_start, 550 .next = ctx_rq_list_next, 551 .stop = ctx_rq_list_stop, 552 .show = blk_mq_debugfs_rq_show, 553 }; 554 static int ctx_dispatched_show(void *data, struct seq_file *m) 555 { 556 struct blk_mq_ctx *ctx = data; 557 558 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); 559 return 0; 560 } 561 562 static ssize_t ctx_dispatched_write(void *data, const char __user *buf, 563 size_t count, loff_t *ppos) 564 { 565 struct blk_mq_ctx *ctx = data; 566 567 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; 568 return count; 569 } 570 571 static int ctx_merged_show(void *data, struct seq_file *m) 572 { 573 struct blk_mq_ctx *ctx = data; 574 575 seq_printf(m, "%lu\n", ctx->rq_merged); 576 return 0; 577 } 578 579 static ssize_t ctx_merged_write(void *data, const char __user *buf, 580 size_t count, loff_t *ppos) 581 { 582 struct blk_mq_ctx *ctx = data; 583 584 ctx->rq_merged = 0; 585 return count; 586 } 587 588 static int ctx_completed_show(void *data, struct seq_file *m) 589 { 590 struct blk_mq_ctx *ctx = data; 591 592 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); 593 return 0; 594 } 595 596 static ssize_t ctx_completed_write(void *data, const char __user *buf, 597 size_t count, loff_t *ppos) 598 { 599 struct blk_mq_ctx *ctx = data; 600 601 ctx->rq_completed[0] = ctx->rq_completed[1] = 0; 602 return count; 603 } 604 605 static int blk_mq_debugfs_show(struct seq_file *m, void *v) 606 { 607 const struct blk_mq_debugfs_attr *attr = m->private; 608 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 609 610 return attr->show(data, m); 611 } 612 613 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 614 size_t count, loff_t *ppos) 615 { 616 struct seq_file *m = file->private_data; 617 const struct blk_mq_debugfs_attr *attr = m->private; 618 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 619 620 if (!attr->write) 621 return -EPERM; 622 623 return attr->write(data, buf, count, ppos); 624 } 625 626 static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 627 { 628 const struct blk_mq_debugfs_attr *attr = inode->i_private; 629 void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 630 struct seq_file *m; 631 int ret; 632 633 if (attr->seq_ops) { 634 ret = seq_open(file, attr->seq_ops); 635 if (!ret) { 636 m = file->private_data; 637 m->private = data; 638 } 639 return ret; 640 } 641 642 if (WARN_ON_ONCE(!attr->show)) 643 return -EPERM; 644 645 return single_open(file, blk_mq_debugfs_show, inode->i_private); 646 } 647 648 static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 649 { 650 const struct blk_mq_debugfs_attr *attr = inode->i_private; 651 652 if (attr->show) 653 return single_release(inode, file); 654 else 655 return seq_release(inode, file); 656 } 657 658 const struct file_operations blk_mq_debugfs_fops = { 659 .open = blk_mq_debugfs_open, 660 .read = seq_read, 661 .write = blk_mq_debugfs_write, 662 .llseek = seq_lseek, 663 .release = blk_mq_debugfs_release, 664 }; 665 666 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 667 {"poll_stat", 0400, queue_poll_stat_show}, 668 {"state", 0600, queue_state_show, queue_state_write}, 669 {}, 670 }; 671 672 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 673 {"state", 0400, hctx_state_show}, 674 {"flags", 0400, hctx_flags_show}, 675 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 676 {"ctx_map", 0400, hctx_ctx_map_show}, 677 {"tags", 0400, hctx_tags_show}, 678 {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 679 {"sched_tags", 0400, hctx_sched_tags_show}, 680 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 681 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, 682 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, 683 {"queued", 0600, hctx_queued_show, hctx_queued_write}, 684 {"run", 0600, hctx_run_show, hctx_run_write}, 685 {"active", 0400, hctx_active_show}, 686 {}, 687 }; 688 689 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 690 {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops}, 691 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, 692 {"merged", 0600, ctx_merged_show, ctx_merged_write}, 693 {"completed", 0600, ctx_completed_show, ctx_completed_write}, 694 {}, 695 }; 696 697 static bool debugfs_create_files(struct dentry *parent, void *data, 698 const struct blk_mq_debugfs_attr *attr) 699 { 700 d_inode(parent)->i_private = data; 701 702 for (; attr->name; attr++) { 703 if (!debugfs_create_file(attr->name, attr->mode, parent, 704 (void *)attr, &blk_mq_debugfs_fops)) 705 return false; 706 } 707 return true; 708 } 709 710 int blk_mq_debugfs_register(struct request_queue *q) 711 { 712 struct blk_mq_hw_ctx *hctx; 713 int i; 714 715 if (!blk_debugfs_root) 716 return -ENOENT; 717 718 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 719 blk_debugfs_root); 720 if (!q->debugfs_dir) 721 return -ENOMEM; 722 723 if (!debugfs_create_files(q->debugfs_dir, q, 724 blk_mq_debugfs_queue_attrs)) 725 goto err; 726 727 /* 728 * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir 729 * didn't exist yet (because we don't know what to name the directory 730 * until the queue is registered to a gendisk). 731 */ 732 queue_for_each_hw_ctx(q, hctx, i) { 733 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) 734 goto err; 735 if (q->elevator && !hctx->sched_debugfs_dir && 736 blk_mq_debugfs_register_sched_hctx(q, hctx)) 737 goto err; 738 } 739 740 return 0; 741 742 err: 743 blk_mq_debugfs_unregister(q); 744 return -ENOMEM; 745 } 746 747 void blk_mq_debugfs_unregister(struct request_queue *q) 748 { 749 debugfs_remove_recursive(q->debugfs_dir); 750 q->sched_debugfs_dir = NULL; 751 q->debugfs_dir = NULL; 752 } 753 754 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 755 struct blk_mq_ctx *ctx) 756 { 757 struct dentry *ctx_dir; 758 char name[20]; 759 760 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 761 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 762 if (!ctx_dir) 763 return -ENOMEM; 764 765 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs)) 766 return -ENOMEM; 767 768 return 0; 769 } 770 771 int blk_mq_debugfs_register_hctx(struct request_queue *q, 772 struct blk_mq_hw_ctx *hctx) 773 { 774 struct blk_mq_ctx *ctx; 775 char name[20]; 776 int i; 777 778 if (!q->debugfs_dir) 779 return -ENOENT; 780 781 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 782 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 783 if (!hctx->debugfs_dir) 784 return -ENOMEM; 785 786 if (!debugfs_create_files(hctx->debugfs_dir, hctx, 787 blk_mq_debugfs_hctx_attrs)) 788 goto err; 789 790 hctx_for_each_ctx(hctx, ctx, i) { 791 if (blk_mq_debugfs_register_ctx(hctx, ctx)) 792 goto err; 793 } 794 795 return 0; 796 797 err: 798 blk_mq_debugfs_unregister_hctx(hctx); 799 return -ENOMEM; 800 } 801 802 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 803 { 804 debugfs_remove_recursive(hctx->debugfs_dir); 805 hctx->sched_debugfs_dir = NULL; 806 hctx->debugfs_dir = NULL; 807 } 808 809 int blk_mq_debugfs_register_hctxs(struct request_queue *q) 810 { 811 struct blk_mq_hw_ctx *hctx; 812 int i; 813 814 queue_for_each_hw_ctx(q, hctx, i) { 815 if (blk_mq_debugfs_register_hctx(q, hctx)) 816 return -ENOMEM; 817 } 818 819 return 0; 820 } 821 822 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 823 { 824 struct blk_mq_hw_ctx *hctx; 825 int i; 826 827 queue_for_each_hw_ctx(q, hctx, i) 828 blk_mq_debugfs_unregister_hctx(hctx); 829 } 830 831 int blk_mq_debugfs_register_sched(struct request_queue *q) 832 { 833 struct elevator_type *e = q->elevator->type; 834 835 if (!q->debugfs_dir) 836 return -ENOENT; 837 838 if (!e->queue_debugfs_attrs) 839 return 0; 840 841 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 842 if (!q->sched_debugfs_dir) 843 return -ENOMEM; 844 845 if (!debugfs_create_files(q->sched_debugfs_dir, q, 846 e->queue_debugfs_attrs)) 847 goto err; 848 849 return 0; 850 851 err: 852 blk_mq_debugfs_unregister_sched(q); 853 return -ENOMEM; 854 } 855 856 void blk_mq_debugfs_unregister_sched(struct request_queue *q) 857 { 858 debugfs_remove_recursive(q->sched_debugfs_dir); 859 q->sched_debugfs_dir = NULL; 860 } 861 862 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 863 struct blk_mq_hw_ctx *hctx) 864 { 865 struct elevator_type *e = q->elevator->type; 866 867 if (!hctx->debugfs_dir) 868 return -ENOENT; 869 870 if (!e->hctx_debugfs_attrs) 871 return 0; 872 873 hctx->sched_debugfs_dir = debugfs_create_dir("sched", 874 hctx->debugfs_dir); 875 if (!hctx->sched_debugfs_dir) 876 return -ENOMEM; 877 878 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx, 879 e->hctx_debugfs_attrs)) 880 return -ENOMEM; 881 882 return 0; 883 } 884 885 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 886 { 887 debugfs_remove_recursive(hctx->sched_debugfs_dir); 888 hctx->sched_debugfs_dir = NULL; 889 } 890