xref: /linux/block/blk-mq-debugfs.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Facebook
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/build_bug.h>
9 #include <linux/debugfs.h>
10 
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-sched.h"
15 #include "blk-rq-qos.h"
16 
17 static int queue_poll_stat_show(void *data, struct seq_file *m)
18 {
19 	return 0;
20 }
21 
22 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
23 	__acquires(&q->requeue_lock)
24 {
25 	struct request_queue *q = m->private;
26 
27 	spin_lock_irq(&q->requeue_lock);
28 	return seq_list_start(&q->requeue_list, *pos);
29 }
30 
31 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
32 {
33 	struct request_queue *q = m->private;
34 
35 	return seq_list_next(v, &q->requeue_list, pos);
36 }
37 
38 static void queue_requeue_list_stop(struct seq_file *m, void *v)
39 	__releases(&q->requeue_lock)
40 {
41 	struct request_queue *q = m->private;
42 
43 	spin_unlock_irq(&q->requeue_lock);
44 }
45 
46 static const struct seq_operations queue_requeue_list_seq_ops = {
47 	.start	= queue_requeue_list_start,
48 	.next	= queue_requeue_list_next,
49 	.stop	= queue_requeue_list_stop,
50 	.show	= blk_mq_debugfs_rq_show,
51 };
52 
53 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
54 			  const char *const *flag_name, int flag_name_count)
55 {
56 	bool sep = false;
57 	int i;
58 
59 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
60 		if (!(flags & BIT(i)))
61 			continue;
62 		if (sep)
63 			seq_puts(m, "|");
64 		sep = true;
65 		if (i < flag_name_count && flag_name[i])
66 			seq_puts(m, flag_name[i]);
67 		else
68 			seq_printf(m, "%d", i);
69 	}
70 	return 0;
71 }
72 
73 static int queue_pm_only_show(void *data, struct seq_file *m)
74 {
75 	struct request_queue *q = data;
76 
77 	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
78 	return 0;
79 }
80 
81 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
82 static const char *const blk_queue_flag_name[] = {
83 	QUEUE_FLAG_NAME(DYING),
84 	QUEUE_FLAG_NAME(NOMERGES),
85 	QUEUE_FLAG_NAME(SAME_COMP),
86 	QUEUE_FLAG_NAME(FAIL_IO),
87 	QUEUE_FLAG_NAME(NOXMERGES),
88 	QUEUE_FLAG_NAME(SAME_FORCE),
89 	QUEUE_FLAG_NAME(INIT_DONE),
90 	QUEUE_FLAG_NAME(STATS),
91 	QUEUE_FLAG_NAME(REGISTERED),
92 	QUEUE_FLAG_NAME(QUIESCED),
93 	QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
94 	QUEUE_FLAG_NAME(HCTX_ACTIVE),
95 	QUEUE_FLAG_NAME(SQ_SCHED),
96 };
97 #undef QUEUE_FLAG_NAME
98 
99 static int queue_state_show(void *data, struct seq_file *m)
100 {
101 	struct request_queue *q = data;
102 
103 	BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX);
104 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
105 		       ARRAY_SIZE(blk_queue_flag_name));
106 	seq_puts(m, "\n");
107 	return 0;
108 }
109 
110 static ssize_t queue_state_write(void *data, const char __user *buf,
111 				 size_t count, loff_t *ppos)
112 {
113 	struct request_queue *q = data;
114 	char opbuf[16] = { }, *op;
115 
116 	/*
117 	 * The "state" attribute is removed when the queue is removed.  Don't
118 	 * allow setting the state on a dying queue to avoid a use-after-free.
119 	 */
120 	if (blk_queue_dying(q))
121 		return -ENOENT;
122 
123 	if (count >= sizeof(opbuf)) {
124 		pr_err("%s: operation too long\n", __func__);
125 		goto inval;
126 	}
127 
128 	if (copy_from_user(opbuf, buf, count))
129 		return -EFAULT;
130 	op = strstrip(opbuf);
131 	if (strcmp(op, "run") == 0) {
132 		blk_mq_run_hw_queues(q, true);
133 	} else if (strcmp(op, "start") == 0) {
134 		blk_mq_start_stopped_hw_queues(q, true);
135 	} else if (strcmp(op, "kick") == 0) {
136 		blk_mq_kick_requeue_list(q);
137 	} else {
138 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
139 inval:
140 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
141 		return -EINVAL;
142 	}
143 	return count;
144 }
145 
146 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
147 	{ "poll_stat", 0400, queue_poll_stat_show },
148 	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
149 	{ "pm_only", 0600, queue_pm_only_show, NULL },
150 	{ "state", 0600, queue_state_show, queue_state_write },
151 	{ "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
152 	{ },
153 };
154 
155 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
156 static const char *const hctx_state_name[] = {
157 	HCTX_STATE_NAME(STOPPED),
158 	HCTX_STATE_NAME(TAG_ACTIVE),
159 	HCTX_STATE_NAME(SCHED_RESTART),
160 	HCTX_STATE_NAME(INACTIVE),
161 };
162 #undef HCTX_STATE_NAME
163 
164 static int hctx_state_show(void *data, struct seq_file *m)
165 {
166 	struct blk_mq_hw_ctx *hctx = data;
167 
168 	BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX);
169 	blk_flags_show(m, hctx->state, hctx_state_name,
170 		       ARRAY_SIZE(hctx_state_name));
171 	seq_puts(m, "\n");
172 	return 0;
173 }
174 
175 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
176 static const char *const alloc_policy_name[] = {
177 	BLK_TAG_ALLOC_NAME(FIFO),
178 	BLK_TAG_ALLOC_NAME(RR),
179 };
180 #undef BLK_TAG_ALLOC_NAME
181 
182 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
183 static const char *const hctx_flag_name[] = {
184 	HCTX_FLAG_NAME(SHOULD_MERGE),
185 	HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
186 	HCTX_FLAG_NAME(STACKING),
187 	HCTX_FLAG_NAME(TAG_HCTX_SHARED),
188 	HCTX_FLAG_NAME(BLOCKING),
189 	HCTX_FLAG_NAME(NO_SCHED),
190 	HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
191 };
192 #undef HCTX_FLAG_NAME
193 
194 static int hctx_flags_show(void *data, struct seq_file *m)
195 {
196 	struct blk_mq_hw_ctx *hctx = data;
197 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
198 
199 	BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) !=
200 			BLK_MQ_F_ALLOC_POLICY_START_BIT);
201 	BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX);
202 
203 	seq_puts(m, "alloc_policy=");
204 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
205 	    alloc_policy_name[alloc_policy])
206 		seq_puts(m, alloc_policy_name[alloc_policy]);
207 	else
208 		seq_printf(m, "%d", alloc_policy);
209 	seq_puts(m, " ");
210 	blk_flags_show(m,
211 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
212 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
213 	seq_puts(m, "\n");
214 	return 0;
215 }
216 
217 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
218 static const char *const cmd_flag_name[] = {
219 	CMD_FLAG_NAME(FAILFAST_DEV),
220 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
221 	CMD_FLAG_NAME(FAILFAST_DRIVER),
222 	CMD_FLAG_NAME(SYNC),
223 	CMD_FLAG_NAME(META),
224 	CMD_FLAG_NAME(PRIO),
225 	CMD_FLAG_NAME(NOMERGE),
226 	CMD_FLAG_NAME(IDLE),
227 	CMD_FLAG_NAME(INTEGRITY),
228 	CMD_FLAG_NAME(FUA),
229 	CMD_FLAG_NAME(PREFLUSH),
230 	CMD_FLAG_NAME(RAHEAD),
231 	CMD_FLAG_NAME(BACKGROUND),
232 	CMD_FLAG_NAME(NOWAIT),
233 	CMD_FLAG_NAME(POLLED),
234 	CMD_FLAG_NAME(ALLOC_CACHE),
235 	CMD_FLAG_NAME(SWAP),
236 	CMD_FLAG_NAME(DRV),
237 	CMD_FLAG_NAME(FS_PRIVATE),
238 	CMD_FLAG_NAME(ATOMIC),
239 	CMD_FLAG_NAME(NOUNMAP),
240 };
241 #undef CMD_FLAG_NAME
242 
243 #define RQF_NAME(name) [__RQF_##name] = #name
244 static const char *const rqf_name[] = {
245 	RQF_NAME(STARTED),
246 	RQF_NAME(FLUSH_SEQ),
247 	RQF_NAME(MIXED_MERGE),
248 	RQF_NAME(DONTPREP),
249 	RQF_NAME(SCHED_TAGS),
250 	RQF_NAME(USE_SCHED),
251 	RQF_NAME(FAILED),
252 	RQF_NAME(QUIET),
253 	RQF_NAME(IO_STAT),
254 	RQF_NAME(PM),
255 	RQF_NAME(HASHED),
256 	RQF_NAME(STATS),
257 	RQF_NAME(SPECIAL_PAYLOAD),
258 	RQF_NAME(ZONE_WRITE_PLUGGING),
259 	RQF_NAME(TIMED_OUT),
260 	RQF_NAME(RESV),
261 };
262 #undef RQF_NAME
263 
264 static const char *const blk_mq_rq_state_name_array[] = {
265 	[MQ_RQ_IDLE]		= "idle",
266 	[MQ_RQ_IN_FLIGHT]	= "in_flight",
267 	[MQ_RQ_COMPLETE]	= "complete",
268 };
269 
270 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
271 {
272 	if (WARN_ON_ONCE((unsigned int)rq_state >=
273 			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
274 		return "(?)";
275 	return blk_mq_rq_state_name_array[rq_state];
276 }
277 
278 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
279 {
280 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
281 	const enum req_op op = req_op(rq);
282 	const char *op_str = blk_op_str(op);
283 
284 	BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS);
285 	BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS);
286 
287 	seq_printf(m, "%p {.op=", rq);
288 	if (strcmp(op_str, "UNKNOWN") == 0)
289 		seq_printf(m, "%u", op);
290 	else
291 		seq_printf(m, "%s", op_str);
292 	seq_puts(m, ", .cmd_flags=");
293 	blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
294 		       cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
295 	seq_puts(m, ", .rq_flags=");
296 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
297 		       ARRAY_SIZE(rqf_name));
298 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
299 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
300 		   rq->internal_tag);
301 	if (mq_ops->show_rq)
302 		mq_ops->show_rq(m, rq);
303 	seq_puts(m, "}\n");
304 	return 0;
305 }
306 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
307 
308 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
309 {
310 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
311 }
312 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
313 
314 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
315 	__acquires(&hctx->lock)
316 {
317 	struct blk_mq_hw_ctx *hctx = m->private;
318 
319 	spin_lock(&hctx->lock);
320 	return seq_list_start(&hctx->dispatch, *pos);
321 }
322 
323 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
324 {
325 	struct blk_mq_hw_ctx *hctx = m->private;
326 
327 	return seq_list_next(v, &hctx->dispatch, pos);
328 }
329 
330 static void hctx_dispatch_stop(struct seq_file *m, void *v)
331 	__releases(&hctx->lock)
332 {
333 	struct blk_mq_hw_ctx *hctx = m->private;
334 
335 	spin_unlock(&hctx->lock);
336 }
337 
338 static const struct seq_operations hctx_dispatch_seq_ops = {
339 	.start	= hctx_dispatch_start,
340 	.next	= hctx_dispatch_next,
341 	.stop	= hctx_dispatch_stop,
342 	.show	= blk_mq_debugfs_rq_show,
343 };
344 
345 struct show_busy_params {
346 	struct seq_file		*m;
347 	struct blk_mq_hw_ctx	*hctx;
348 };
349 
350 /*
351  * Note: the state of a request may change while this function is in progress,
352  * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
353  * keep iterating requests.
354  */
355 static bool hctx_show_busy_rq(struct request *rq, void *data)
356 {
357 	const struct show_busy_params *params = data;
358 
359 	if (rq->mq_hctx == params->hctx)
360 		__blk_mq_debugfs_rq_show(params->m, rq);
361 
362 	return true;
363 }
364 
365 static int hctx_busy_show(void *data, struct seq_file *m)
366 {
367 	struct blk_mq_hw_ctx *hctx = data;
368 	struct show_busy_params params = { .m = m, .hctx = hctx };
369 
370 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
371 				&params);
372 
373 	return 0;
374 }
375 
376 static const char *const hctx_types[] = {
377 	[HCTX_TYPE_DEFAULT]	= "default",
378 	[HCTX_TYPE_READ]	= "read",
379 	[HCTX_TYPE_POLL]	= "poll",
380 };
381 
382 static int hctx_type_show(void *data, struct seq_file *m)
383 {
384 	struct blk_mq_hw_ctx *hctx = data;
385 
386 	BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
387 	seq_printf(m, "%s\n", hctx_types[hctx->type]);
388 	return 0;
389 }
390 
391 static int hctx_ctx_map_show(void *data, struct seq_file *m)
392 {
393 	struct blk_mq_hw_ctx *hctx = data;
394 
395 	sbitmap_bitmap_show(&hctx->ctx_map, m);
396 	return 0;
397 }
398 
399 static void blk_mq_debugfs_tags_show(struct seq_file *m,
400 				     struct blk_mq_tags *tags)
401 {
402 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
403 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
404 	seq_printf(m, "active_queues=%d\n",
405 		   READ_ONCE(tags->active_queues));
406 
407 	seq_puts(m, "\nbitmap_tags:\n");
408 	sbitmap_queue_show(&tags->bitmap_tags, m);
409 
410 	if (tags->nr_reserved_tags) {
411 		seq_puts(m, "\nbreserved_tags:\n");
412 		sbitmap_queue_show(&tags->breserved_tags, m);
413 	}
414 }
415 
416 static int hctx_tags_show(void *data, struct seq_file *m)
417 {
418 	struct blk_mq_hw_ctx *hctx = data;
419 	struct request_queue *q = hctx->queue;
420 	int res;
421 
422 	res = mutex_lock_interruptible(&q->sysfs_lock);
423 	if (res)
424 		goto out;
425 	if (hctx->tags)
426 		blk_mq_debugfs_tags_show(m, hctx->tags);
427 	mutex_unlock(&q->sysfs_lock);
428 
429 out:
430 	return res;
431 }
432 
433 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
434 {
435 	struct blk_mq_hw_ctx *hctx = data;
436 	struct request_queue *q = hctx->queue;
437 	int res;
438 
439 	res = mutex_lock_interruptible(&q->sysfs_lock);
440 	if (res)
441 		goto out;
442 	if (hctx->tags)
443 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
444 	mutex_unlock(&q->sysfs_lock);
445 
446 out:
447 	return res;
448 }
449 
450 static int hctx_sched_tags_show(void *data, struct seq_file *m)
451 {
452 	struct blk_mq_hw_ctx *hctx = data;
453 	struct request_queue *q = hctx->queue;
454 	int res;
455 
456 	res = mutex_lock_interruptible(&q->sysfs_lock);
457 	if (res)
458 		goto out;
459 	if (hctx->sched_tags)
460 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
461 	mutex_unlock(&q->sysfs_lock);
462 
463 out:
464 	return res;
465 }
466 
467 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
468 {
469 	struct blk_mq_hw_ctx *hctx = data;
470 	struct request_queue *q = hctx->queue;
471 	int res;
472 
473 	res = mutex_lock_interruptible(&q->sysfs_lock);
474 	if (res)
475 		goto out;
476 	if (hctx->sched_tags)
477 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
478 	mutex_unlock(&q->sysfs_lock);
479 
480 out:
481 	return res;
482 }
483 
484 static int hctx_active_show(void *data, struct seq_file *m)
485 {
486 	struct blk_mq_hw_ctx *hctx = data;
487 
488 	seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
489 	return 0;
490 }
491 
492 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
493 {
494 	struct blk_mq_hw_ctx *hctx = data;
495 
496 	seq_printf(m, "%u\n", hctx->dispatch_busy);
497 	return 0;
498 }
499 
500 #define CTX_RQ_SEQ_OPS(name, type)					\
501 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
502 	__acquires(&ctx->lock)						\
503 {									\
504 	struct blk_mq_ctx *ctx = m->private;				\
505 									\
506 	spin_lock(&ctx->lock);						\
507 	return seq_list_start(&ctx->rq_lists[type], *pos);		\
508 }									\
509 									\
510 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,	\
511 				     loff_t *pos)			\
512 {									\
513 	struct blk_mq_ctx *ctx = m->private;				\
514 									\
515 	return seq_list_next(v, &ctx->rq_lists[type], pos);		\
516 }									\
517 									\
518 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)	\
519 	__releases(&ctx->lock)						\
520 {									\
521 	struct blk_mq_ctx *ctx = m->private;				\
522 									\
523 	spin_unlock(&ctx->lock);					\
524 }									\
525 									\
526 static const struct seq_operations ctx_##name##_rq_list_seq_ops = {	\
527 	.start	= ctx_##name##_rq_list_start,				\
528 	.next	= ctx_##name##_rq_list_next,				\
529 	.stop	= ctx_##name##_rq_list_stop,				\
530 	.show	= blk_mq_debugfs_rq_show,				\
531 }
532 
533 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
534 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
535 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
536 
537 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
538 {
539 	const struct blk_mq_debugfs_attr *attr = m->private;
540 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
541 
542 	return attr->show(data, m);
543 }
544 
545 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
546 				    size_t count, loff_t *ppos)
547 {
548 	struct seq_file *m = file->private_data;
549 	const struct blk_mq_debugfs_attr *attr = m->private;
550 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
551 
552 	/*
553 	 * Attributes that only implement .seq_ops are read-only and 'attr' is
554 	 * the same with 'data' in this case.
555 	 */
556 	if (attr == data || !attr->write)
557 		return -EPERM;
558 
559 	return attr->write(data, buf, count, ppos);
560 }
561 
562 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
563 {
564 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
565 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
566 	struct seq_file *m;
567 	int ret;
568 
569 	if (attr->seq_ops) {
570 		ret = seq_open(file, attr->seq_ops);
571 		if (!ret) {
572 			m = file->private_data;
573 			m->private = data;
574 		}
575 		return ret;
576 	}
577 
578 	if (WARN_ON_ONCE(!attr->show))
579 		return -EPERM;
580 
581 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
582 }
583 
584 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
585 {
586 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
587 
588 	if (attr->show)
589 		return single_release(inode, file);
590 
591 	return seq_release(inode, file);
592 }
593 
594 static const struct file_operations blk_mq_debugfs_fops = {
595 	.open		= blk_mq_debugfs_open,
596 	.read		= seq_read,
597 	.write		= blk_mq_debugfs_write,
598 	.llseek		= seq_lseek,
599 	.release	= blk_mq_debugfs_release,
600 };
601 
602 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
603 	{"state", 0400, hctx_state_show},
604 	{"flags", 0400, hctx_flags_show},
605 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
606 	{"busy", 0400, hctx_busy_show},
607 	{"ctx_map", 0400, hctx_ctx_map_show},
608 	{"tags", 0400, hctx_tags_show},
609 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
610 	{"sched_tags", 0400, hctx_sched_tags_show},
611 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
612 	{"active", 0400, hctx_active_show},
613 	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
614 	{"type", 0400, hctx_type_show},
615 	{},
616 };
617 
618 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
619 	{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
620 	{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
621 	{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
622 	{},
623 };
624 
625 static void debugfs_create_files(struct dentry *parent, void *data,
626 				 const struct blk_mq_debugfs_attr *attr)
627 {
628 	if (IS_ERR_OR_NULL(parent))
629 		return;
630 
631 	d_inode(parent)->i_private = data;
632 
633 	for (; attr->name; attr++)
634 		debugfs_create_file(attr->name, attr->mode, parent,
635 				    (void *)attr, &blk_mq_debugfs_fops);
636 }
637 
638 void blk_mq_debugfs_register(struct request_queue *q)
639 {
640 	struct blk_mq_hw_ctx *hctx;
641 	unsigned long i;
642 
643 	debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
644 
645 	/*
646 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
647 	 * didn't exist yet (because we don't know what to name the directory
648 	 * until the queue is registered to a gendisk).
649 	 */
650 	if (q->elevator && !q->sched_debugfs_dir)
651 		blk_mq_debugfs_register_sched(q);
652 
653 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
654 	queue_for_each_hw_ctx(q, hctx, i) {
655 		if (!hctx->debugfs_dir)
656 			blk_mq_debugfs_register_hctx(q, hctx);
657 		if (q->elevator && !hctx->sched_debugfs_dir)
658 			blk_mq_debugfs_register_sched_hctx(q, hctx);
659 	}
660 
661 	if (q->rq_qos) {
662 		struct rq_qos *rqos = q->rq_qos;
663 
664 		while (rqos) {
665 			blk_mq_debugfs_register_rqos(rqos);
666 			rqos = rqos->next;
667 		}
668 	}
669 }
670 
671 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
672 					struct blk_mq_ctx *ctx)
673 {
674 	struct dentry *ctx_dir;
675 	char name[20];
676 
677 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
678 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
679 
680 	debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
681 }
682 
683 void blk_mq_debugfs_register_hctx(struct request_queue *q,
684 				  struct blk_mq_hw_ctx *hctx)
685 {
686 	struct blk_mq_ctx *ctx;
687 	char name[20];
688 	int i;
689 
690 	if (!q->debugfs_dir)
691 		return;
692 
693 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
694 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
695 
696 	debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
697 
698 	hctx_for_each_ctx(hctx, ctx, i)
699 		blk_mq_debugfs_register_ctx(hctx, ctx);
700 }
701 
702 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
703 {
704 	if (!hctx->queue->debugfs_dir)
705 		return;
706 	debugfs_remove_recursive(hctx->debugfs_dir);
707 	hctx->sched_debugfs_dir = NULL;
708 	hctx->debugfs_dir = NULL;
709 }
710 
711 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
712 {
713 	struct blk_mq_hw_ctx *hctx;
714 	unsigned long i;
715 
716 	queue_for_each_hw_ctx(q, hctx, i)
717 		blk_mq_debugfs_register_hctx(q, hctx);
718 }
719 
720 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
721 {
722 	struct blk_mq_hw_ctx *hctx;
723 	unsigned long i;
724 
725 	queue_for_each_hw_ctx(q, hctx, i)
726 		blk_mq_debugfs_unregister_hctx(hctx);
727 }
728 
729 void blk_mq_debugfs_register_sched(struct request_queue *q)
730 {
731 	struct elevator_type *e = q->elevator->type;
732 
733 	lockdep_assert_held(&q->debugfs_mutex);
734 
735 	/*
736 	 * If the parent directory has not been created yet, return, we will be
737 	 * called again later on and the directory/files will be created then.
738 	 */
739 	if (!q->debugfs_dir)
740 		return;
741 
742 	if (!e->queue_debugfs_attrs)
743 		return;
744 
745 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
746 
747 	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
748 }
749 
750 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
751 {
752 	lockdep_assert_held(&q->debugfs_mutex);
753 
754 	debugfs_remove_recursive(q->sched_debugfs_dir);
755 	q->sched_debugfs_dir = NULL;
756 }
757 
758 static const char *rq_qos_id_to_name(enum rq_qos_id id)
759 {
760 	switch (id) {
761 	case RQ_QOS_WBT:
762 		return "wbt";
763 	case RQ_QOS_LATENCY:
764 		return "latency";
765 	case RQ_QOS_COST:
766 		return "cost";
767 	}
768 	return "unknown";
769 }
770 
771 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
772 {
773 	lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
774 
775 	if (!rqos->disk->queue->debugfs_dir)
776 		return;
777 	debugfs_remove_recursive(rqos->debugfs_dir);
778 	rqos->debugfs_dir = NULL;
779 }
780 
781 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
782 {
783 	struct request_queue *q = rqos->disk->queue;
784 	const char *dir_name = rq_qos_id_to_name(rqos->id);
785 
786 	lockdep_assert_held(&q->debugfs_mutex);
787 
788 	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
789 		return;
790 
791 	if (!q->rqos_debugfs_dir)
792 		q->rqos_debugfs_dir = debugfs_create_dir("rqos",
793 							 q->debugfs_dir);
794 
795 	rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
796 	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
797 }
798 
799 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
800 					struct blk_mq_hw_ctx *hctx)
801 {
802 	struct elevator_type *e = q->elevator->type;
803 
804 	lockdep_assert_held(&q->debugfs_mutex);
805 
806 	/*
807 	 * If the parent debugfs directory has not been created yet, return;
808 	 * We will be called again later on with appropriate parent debugfs
809 	 * directory from blk_register_queue()
810 	 */
811 	if (!hctx->debugfs_dir)
812 		return;
813 
814 	if (!e->hctx_debugfs_attrs)
815 		return;
816 
817 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
818 						     hctx->debugfs_dir);
819 	debugfs_create_files(hctx->sched_debugfs_dir, hctx,
820 			     e->hctx_debugfs_attrs);
821 }
822 
823 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
824 {
825 	lockdep_assert_held(&hctx->queue->debugfs_mutex);
826 
827 	if (!hctx->queue->debugfs_dir)
828 		return;
829 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
830 	hctx->sched_debugfs_dir = NULL;
831 }
832