xref: /linux/block/blk-mq-debugfs.c (revision ff9fbcafbaf13346c742c0d672a22f5ac20b9d92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Facebook
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9 
10 #include "blk.h"
11 #include "blk-mq.h"
12 #include "blk-mq-debugfs.h"
13 #include "blk-mq-sched.h"
14 #include "blk-rq-qos.h"
15 
16 static int queue_poll_stat_show(void *data, struct seq_file *m)
17 {
18 	return 0;
19 }
20 
21 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
22 	__acquires(&q->requeue_lock)
23 {
24 	struct request_queue *q = m->private;
25 
26 	spin_lock_irq(&q->requeue_lock);
27 	return seq_list_start(&q->requeue_list, *pos);
28 }
29 
30 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
31 {
32 	struct request_queue *q = m->private;
33 
34 	return seq_list_next(v, &q->requeue_list, pos);
35 }
36 
37 static void queue_requeue_list_stop(struct seq_file *m, void *v)
38 	__releases(&q->requeue_lock)
39 {
40 	struct request_queue *q = m->private;
41 
42 	spin_unlock_irq(&q->requeue_lock);
43 }
44 
45 static const struct seq_operations queue_requeue_list_seq_ops = {
46 	.start	= queue_requeue_list_start,
47 	.next	= queue_requeue_list_next,
48 	.stop	= queue_requeue_list_stop,
49 	.show	= blk_mq_debugfs_rq_show,
50 };
51 
52 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
53 			  const char *const *flag_name, int flag_name_count)
54 {
55 	bool sep = false;
56 	int i;
57 
58 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
59 		if (!(flags & BIT(i)))
60 			continue;
61 		if (sep)
62 			seq_puts(m, "|");
63 		sep = true;
64 		if (i < flag_name_count && flag_name[i])
65 			seq_puts(m, flag_name[i]);
66 		else
67 			seq_printf(m, "%d", i);
68 	}
69 	return 0;
70 }
71 
72 static int queue_pm_only_show(void *data, struct seq_file *m)
73 {
74 	struct request_queue *q = data;
75 
76 	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
77 	return 0;
78 }
79 
80 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
81 static const char *const blk_queue_flag_name[] = {
82 	QUEUE_FLAG_NAME(STOPPED),
83 	QUEUE_FLAG_NAME(DYING),
84 	QUEUE_FLAG_NAME(NOMERGES),
85 	QUEUE_FLAG_NAME(SAME_COMP),
86 	QUEUE_FLAG_NAME(FAIL_IO),
87 	QUEUE_FLAG_NAME(NOXMERGES),
88 	QUEUE_FLAG_NAME(SAME_FORCE),
89 	QUEUE_FLAG_NAME(INIT_DONE),
90 	QUEUE_FLAG_NAME(STATS),
91 	QUEUE_FLAG_NAME(REGISTERED),
92 	QUEUE_FLAG_NAME(QUIESCED),
93 	QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
94 	QUEUE_FLAG_NAME(HCTX_ACTIVE),
95 	QUEUE_FLAG_NAME(SQ_SCHED),
96 };
97 #undef QUEUE_FLAG_NAME
98 
99 static int queue_state_show(void *data, struct seq_file *m)
100 {
101 	struct request_queue *q = data;
102 
103 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
104 		       ARRAY_SIZE(blk_queue_flag_name));
105 	seq_puts(m, "\n");
106 	return 0;
107 }
108 
109 static ssize_t queue_state_write(void *data, const char __user *buf,
110 				 size_t count, loff_t *ppos)
111 {
112 	struct request_queue *q = data;
113 	char opbuf[16] = { }, *op;
114 
115 	/*
116 	 * The "state" attribute is removed when the queue is removed.  Don't
117 	 * allow setting the state on a dying queue to avoid a use-after-free.
118 	 */
119 	if (blk_queue_dying(q))
120 		return -ENOENT;
121 
122 	if (count >= sizeof(opbuf)) {
123 		pr_err("%s: operation too long\n", __func__);
124 		goto inval;
125 	}
126 
127 	if (copy_from_user(opbuf, buf, count))
128 		return -EFAULT;
129 	op = strstrip(opbuf);
130 	if (strcmp(op, "run") == 0) {
131 		blk_mq_run_hw_queues(q, true);
132 	} else if (strcmp(op, "start") == 0) {
133 		blk_mq_start_stopped_hw_queues(q, true);
134 	} else if (strcmp(op, "kick") == 0) {
135 		blk_mq_kick_requeue_list(q);
136 	} else {
137 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
138 inval:
139 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
140 		return -EINVAL;
141 	}
142 	return count;
143 }
144 
145 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
146 	{ "poll_stat", 0400, queue_poll_stat_show },
147 	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
148 	{ "pm_only", 0600, queue_pm_only_show, NULL },
149 	{ "state", 0600, queue_state_show, queue_state_write },
150 	{ "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
151 	{ },
152 };
153 
154 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
155 static const char *const hctx_state_name[] = {
156 	HCTX_STATE_NAME(STOPPED),
157 	HCTX_STATE_NAME(TAG_ACTIVE),
158 	HCTX_STATE_NAME(SCHED_RESTART),
159 	HCTX_STATE_NAME(INACTIVE),
160 };
161 #undef HCTX_STATE_NAME
162 
163 static int hctx_state_show(void *data, struct seq_file *m)
164 {
165 	struct blk_mq_hw_ctx *hctx = data;
166 
167 	blk_flags_show(m, hctx->state, hctx_state_name,
168 		       ARRAY_SIZE(hctx_state_name));
169 	seq_puts(m, "\n");
170 	return 0;
171 }
172 
173 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
174 static const char *const alloc_policy_name[] = {
175 	BLK_TAG_ALLOC_NAME(FIFO),
176 	BLK_TAG_ALLOC_NAME(RR),
177 };
178 #undef BLK_TAG_ALLOC_NAME
179 
180 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
181 static const char *const hctx_flag_name[] = {
182 	HCTX_FLAG_NAME(SHOULD_MERGE),
183 	HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
184 	HCTX_FLAG_NAME(BLOCKING),
185 	HCTX_FLAG_NAME(NO_SCHED),
186 	HCTX_FLAG_NAME(STACKING),
187 	HCTX_FLAG_NAME(TAG_HCTX_SHARED),
188 };
189 #undef HCTX_FLAG_NAME
190 
191 static int hctx_flags_show(void *data, struct seq_file *m)
192 {
193 	struct blk_mq_hw_ctx *hctx = data;
194 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
195 
196 	seq_puts(m, "alloc_policy=");
197 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
198 	    alloc_policy_name[alloc_policy])
199 		seq_puts(m, alloc_policy_name[alloc_policy]);
200 	else
201 		seq_printf(m, "%d", alloc_policy);
202 	seq_puts(m, " ");
203 	blk_flags_show(m,
204 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
205 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
206 	seq_puts(m, "\n");
207 	return 0;
208 }
209 
210 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
211 static const char *const cmd_flag_name[] = {
212 	CMD_FLAG_NAME(FAILFAST_DEV),
213 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
214 	CMD_FLAG_NAME(FAILFAST_DRIVER),
215 	CMD_FLAG_NAME(SYNC),
216 	CMD_FLAG_NAME(META),
217 	CMD_FLAG_NAME(PRIO),
218 	CMD_FLAG_NAME(NOMERGE),
219 	CMD_FLAG_NAME(IDLE),
220 	CMD_FLAG_NAME(INTEGRITY),
221 	CMD_FLAG_NAME(FUA),
222 	CMD_FLAG_NAME(PREFLUSH),
223 	CMD_FLAG_NAME(RAHEAD),
224 	CMD_FLAG_NAME(BACKGROUND),
225 	CMD_FLAG_NAME(NOWAIT),
226 	CMD_FLAG_NAME(NOUNMAP),
227 	CMD_FLAG_NAME(POLLED),
228 };
229 #undef CMD_FLAG_NAME
230 
231 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
232 static const char *const rqf_name[] = {
233 	RQF_NAME(STARTED),
234 	RQF_NAME(FLUSH_SEQ),
235 	RQF_NAME(MIXED_MERGE),
236 	RQF_NAME(DONTPREP),
237 	RQF_NAME(SCHED_TAGS),
238 	RQF_NAME(USE_SCHED),
239 	RQF_NAME(FAILED),
240 	RQF_NAME(QUIET),
241 	RQF_NAME(IO_STAT),
242 	RQF_NAME(PM),
243 	RQF_NAME(HASHED),
244 	RQF_NAME(STATS),
245 	RQF_NAME(SPECIAL_PAYLOAD),
246 	RQF_NAME(TIMED_OUT),
247 	RQF_NAME(RESV),
248 };
249 #undef RQF_NAME
250 
251 static const char *const blk_mq_rq_state_name_array[] = {
252 	[MQ_RQ_IDLE]		= "idle",
253 	[MQ_RQ_IN_FLIGHT]	= "in_flight",
254 	[MQ_RQ_COMPLETE]	= "complete",
255 };
256 
257 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
258 {
259 	if (WARN_ON_ONCE((unsigned int)rq_state >=
260 			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
261 		return "(?)";
262 	return blk_mq_rq_state_name_array[rq_state];
263 }
264 
265 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
266 {
267 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
268 	const enum req_op op = req_op(rq);
269 	const char *op_str = blk_op_str(op);
270 
271 	seq_printf(m, "%p {.op=", rq);
272 	if (strcmp(op_str, "UNKNOWN") == 0)
273 		seq_printf(m, "%u", op);
274 	else
275 		seq_printf(m, "%s", op_str);
276 	seq_puts(m, ", .cmd_flags=");
277 	blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
278 		       cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
279 	seq_puts(m, ", .rq_flags=");
280 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
281 		       ARRAY_SIZE(rqf_name));
282 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
283 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
284 		   rq->internal_tag);
285 	if (mq_ops->show_rq)
286 		mq_ops->show_rq(m, rq);
287 	seq_puts(m, "}\n");
288 	return 0;
289 }
290 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
291 
292 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
293 {
294 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
295 }
296 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
297 
298 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
299 	__acquires(&hctx->lock)
300 {
301 	struct blk_mq_hw_ctx *hctx = m->private;
302 
303 	spin_lock(&hctx->lock);
304 	return seq_list_start(&hctx->dispatch, *pos);
305 }
306 
307 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
308 {
309 	struct blk_mq_hw_ctx *hctx = m->private;
310 
311 	return seq_list_next(v, &hctx->dispatch, pos);
312 }
313 
314 static void hctx_dispatch_stop(struct seq_file *m, void *v)
315 	__releases(&hctx->lock)
316 {
317 	struct blk_mq_hw_ctx *hctx = m->private;
318 
319 	spin_unlock(&hctx->lock);
320 }
321 
322 static const struct seq_operations hctx_dispatch_seq_ops = {
323 	.start	= hctx_dispatch_start,
324 	.next	= hctx_dispatch_next,
325 	.stop	= hctx_dispatch_stop,
326 	.show	= blk_mq_debugfs_rq_show,
327 };
328 
329 struct show_busy_params {
330 	struct seq_file		*m;
331 	struct blk_mq_hw_ctx	*hctx;
332 };
333 
334 /*
335  * Note: the state of a request may change while this function is in progress,
336  * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
337  * keep iterating requests.
338  */
339 static bool hctx_show_busy_rq(struct request *rq, void *data)
340 {
341 	const struct show_busy_params *params = data;
342 
343 	if (rq->mq_hctx == params->hctx)
344 		__blk_mq_debugfs_rq_show(params->m, rq);
345 
346 	return true;
347 }
348 
349 static int hctx_busy_show(void *data, struct seq_file *m)
350 {
351 	struct blk_mq_hw_ctx *hctx = data;
352 	struct show_busy_params params = { .m = m, .hctx = hctx };
353 
354 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
355 				&params);
356 
357 	return 0;
358 }
359 
360 static const char *const hctx_types[] = {
361 	[HCTX_TYPE_DEFAULT]	= "default",
362 	[HCTX_TYPE_READ]	= "read",
363 	[HCTX_TYPE_POLL]	= "poll",
364 };
365 
366 static int hctx_type_show(void *data, struct seq_file *m)
367 {
368 	struct blk_mq_hw_ctx *hctx = data;
369 
370 	BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
371 	seq_printf(m, "%s\n", hctx_types[hctx->type]);
372 	return 0;
373 }
374 
375 static int hctx_ctx_map_show(void *data, struct seq_file *m)
376 {
377 	struct blk_mq_hw_ctx *hctx = data;
378 
379 	sbitmap_bitmap_show(&hctx->ctx_map, m);
380 	return 0;
381 }
382 
383 static void blk_mq_debugfs_tags_show(struct seq_file *m,
384 				     struct blk_mq_tags *tags)
385 {
386 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
387 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
388 	seq_printf(m, "active_queues=%d\n",
389 		   READ_ONCE(tags->active_queues));
390 
391 	seq_puts(m, "\nbitmap_tags:\n");
392 	sbitmap_queue_show(&tags->bitmap_tags, m);
393 
394 	if (tags->nr_reserved_tags) {
395 		seq_puts(m, "\nbreserved_tags:\n");
396 		sbitmap_queue_show(&tags->breserved_tags, m);
397 	}
398 }
399 
400 static int hctx_tags_show(void *data, struct seq_file *m)
401 {
402 	struct blk_mq_hw_ctx *hctx = data;
403 	struct request_queue *q = hctx->queue;
404 	int res;
405 
406 	res = mutex_lock_interruptible(&q->sysfs_lock);
407 	if (res)
408 		goto out;
409 	if (hctx->tags)
410 		blk_mq_debugfs_tags_show(m, hctx->tags);
411 	mutex_unlock(&q->sysfs_lock);
412 
413 out:
414 	return res;
415 }
416 
417 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
418 {
419 	struct blk_mq_hw_ctx *hctx = data;
420 	struct request_queue *q = hctx->queue;
421 	int res;
422 
423 	res = mutex_lock_interruptible(&q->sysfs_lock);
424 	if (res)
425 		goto out;
426 	if (hctx->tags)
427 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
428 	mutex_unlock(&q->sysfs_lock);
429 
430 out:
431 	return res;
432 }
433 
434 static int hctx_sched_tags_show(void *data, struct seq_file *m)
435 {
436 	struct blk_mq_hw_ctx *hctx = data;
437 	struct request_queue *q = hctx->queue;
438 	int res;
439 
440 	res = mutex_lock_interruptible(&q->sysfs_lock);
441 	if (res)
442 		goto out;
443 	if (hctx->sched_tags)
444 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
445 	mutex_unlock(&q->sysfs_lock);
446 
447 out:
448 	return res;
449 }
450 
451 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
452 {
453 	struct blk_mq_hw_ctx *hctx = data;
454 	struct request_queue *q = hctx->queue;
455 	int res;
456 
457 	res = mutex_lock_interruptible(&q->sysfs_lock);
458 	if (res)
459 		goto out;
460 	if (hctx->sched_tags)
461 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
462 	mutex_unlock(&q->sysfs_lock);
463 
464 out:
465 	return res;
466 }
467 
468 static int hctx_active_show(void *data, struct seq_file *m)
469 {
470 	struct blk_mq_hw_ctx *hctx = data;
471 
472 	seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
473 	return 0;
474 }
475 
476 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
477 {
478 	struct blk_mq_hw_ctx *hctx = data;
479 
480 	seq_printf(m, "%u\n", hctx->dispatch_busy);
481 	return 0;
482 }
483 
484 #define CTX_RQ_SEQ_OPS(name, type)					\
485 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
486 	__acquires(&ctx->lock)						\
487 {									\
488 	struct blk_mq_ctx *ctx = m->private;				\
489 									\
490 	spin_lock(&ctx->lock);						\
491 	return seq_list_start(&ctx->rq_lists[type], *pos);		\
492 }									\
493 									\
494 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,	\
495 				     loff_t *pos)			\
496 {									\
497 	struct blk_mq_ctx *ctx = m->private;				\
498 									\
499 	return seq_list_next(v, &ctx->rq_lists[type], pos);		\
500 }									\
501 									\
502 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)	\
503 	__releases(&ctx->lock)						\
504 {									\
505 	struct blk_mq_ctx *ctx = m->private;				\
506 									\
507 	spin_unlock(&ctx->lock);					\
508 }									\
509 									\
510 static const struct seq_operations ctx_##name##_rq_list_seq_ops = {	\
511 	.start	= ctx_##name##_rq_list_start,				\
512 	.next	= ctx_##name##_rq_list_next,				\
513 	.stop	= ctx_##name##_rq_list_stop,				\
514 	.show	= blk_mq_debugfs_rq_show,				\
515 }
516 
517 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
518 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
519 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
520 
521 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
522 {
523 	const struct blk_mq_debugfs_attr *attr = m->private;
524 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
525 
526 	return attr->show(data, m);
527 }
528 
529 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
530 				    size_t count, loff_t *ppos)
531 {
532 	struct seq_file *m = file->private_data;
533 	const struct blk_mq_debugfs_attr *attr = m->private;
534 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
535 
536 	/*
537 	 * Attributes that only implement .seq_ops are read-only and 'attr' is
538 	 * the same with 'data' in this case.
539 	 */
540 	if (attr == data || !attr->write)
541 		return -EPERM;
542 
543 	return attr->write(data, buf, count, ppos);
544 }
545 
546 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
547 {
548 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
549 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
550 	struct seq_file *m;
551 	int ret;
552 
553 	if (attr->seq_ops) {
554 		ret = seq_open(file, attr->seq_ops);
555 		if (!ret) {
556 			m = file->private_data;
557 			m->private = data;
558 		}
559 		return ret;
560 	}
561 
562 	if (WARN_ON_ONCE(!attr->show))
563 		return -EPERM;
564 
565 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
566 }
567 
568 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
569 {
570 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
571 
572 	if (attr->show)
573 		return single_release(inode, file);
574 
575 	return seq_release(inode, file);
576 }
577 
578 static const struct file_operations blk_mq_debugfs_fops = {
579 	.open		= blk_mq_debugfs_open,
580 	.read		= seq_read,
581 	.write		= blk_mq_debugfs_write,
582 	.llseek		= seq_lseek,
583 	.release	= blk_mq_debugfs_release,
584 };
585 
586 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
587 	{"state", 0400, hctx_state_show},
588 	{"flags", 0400, hctx_flags_show},
589 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
590 	{"busy", 0400, hctx_busy_show},
591 	{"ctx_map", 0400, hctx_ctx_map_show},
592 	{"tags", 0400, hctx_tags_show},
593 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
594 	{"sched_tags", 0400, hctx_sched_tags_show},
595 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
596 	{"active", 0400, hctx_active_show},
597 	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
598 	{"type", 0400, hctx_type_show},
599 	{},
600 };
601 
602 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
603 	{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
604 	{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
605 	{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
606 	{},
607 };
608 
609 static void debugfs_create_files(struct dentry *parent, void *data,
610 				 const struct blk_mq_debugfs_attr *attr)
611 {
612 	if (IS_ERR_OR_NULL(parent))
613 		return;
614 
615 	d_inode(parent)->i_private = data;
616 
617 	for (; attr->name; attr++)
618 		debugfs_create_file(attr->name, attr->mode, parent,
619 				    (void *)attr, &blk_mq_debugfs_fops);
620 }
621 
622 void blk_mq_debugfs_register(struct request_queue *q)
623 {
624 	struct blk_mq_hw_ctx *hctx;
625 	unsigned long i;
626 
627 	debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
628 
629 	/*
630 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
631 	 * didn't exist yet (because we don't know what to name the directory
632 	 * until the queue is registered to a gendisk).
633 	 */
634 	if (q->elevator && !q->sched_debugfs_dir)
635 		blk_mq_debugfs_register_sched(q);
636 
637 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
638 	queue_for_each_hw_ctx(q, hctx, i) {
639 		if (!hctx->debugfs_dir)
640 			blk_mq_debugfs_register_hctx(q, hctx);
641 		if (q->elevator && !hctx->sched_debugfs_dir)
642 			blk_mq_debugfs_register_sched_hctx(q, hctx);
643 	}
644 
645 	if (q->rq_qos) {
646 		struct rq_qos *rqos = q->rq_qos;
647 
648 		while (rqos) {
649 			blk_mq_debugfs_register_rqos(rqos);
650 			rqos = rqos->next;
651 		}
652 	}
653 }
654 
655 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
656 					struct blk_mq_ctx *ctx)
657 {
658 	struct dentry *ctx_dir;
659 	char name[20];
660 
661 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
662 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
663 
664 	debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
665 }
666 
667 void blk_mq_debugfs_register_hctx(struct request_queue *q,
668 				  struct blk_mq_hw_ctx *hctx)
669 {
670 	struct blk_mq_ctx *ctx;
671 	char name[20];
672 	int i;
673 
674 	if (!q->debugfs_dir)
675 		return;
676 
677 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
678 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
679 
680 	debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
681 
682 	hctx_for_each_ctx(hctx, ctx, i)
683 		blk_mq_debugfs_register_ctx(hctx, ctx);
684 }
685 
686 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
687 {
688 	if (!hctx->queue->debugfs_dir)
689 		return;
690 	debugfs_remove_recursive(hctx->debugfs_dir);
691 	hctx->sched_debugfs_dir = NULL;
692 	hctx->debugfs_dir = NULL;
693 }
694 
695 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
696 {
697 	struct blk_mq_hw_ctx *hctx;
698 	unsigned long i;
699 
700 	queue_for_each_hw_ctx(q, hctx, i)
701 		blk_mq_debugfs_register_hctx(q, hctx);
702 }
703 
704 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
705 {
706 	struct blk_mq_hw_ctx *hctx;
707 	unsigned long i;
708 
709 	queue_for_each_hw_ctx(q, hctx, i)
710 		blk_mq_debugfs_unregister_hctx(hctx);
711 }
712 
713 void blk_mq_debugfs_register_sched(struct request_queue *q)
714 {
715 	struct elevator_type *e = q->elevator->type;
716 
717 	lockdep_assert_held(&q->debugfs_mutex);
718 
719 	/*
720 	 * If the parent directory has not been created yet, return, we will be
721 	 * called again later on and the directory/files will be created then.
722 	 */
723 	if (!q->debugfs_dir)
724 		return;
725 
726 	if (!e->queue_debugfs_attrs)
727 		return;
728 
729 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
730 
731 	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
732 }
733 
734 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
735 {
736 	lockdep_assert_held(&q->debugfs_mutex);
737 
738 	debugfs_remove_recursive(q->sched_debugfs_dir);
739 	q->sched_debugfs_dir = NULL;
740 }
741 
742 static const char *rq_qos_id_to_name(enum rq_qos_id id)
743 {
744 	switch (id) {
745 	case RQ_QOS_WBT:
746 		return "wbt";
747 	case RQ_QOS_LATENCY:
748 		return "latency";
749 	case RQ_QOS_COST:
750 		return "cost";
751 	}
752 	return "unknown";
753 }
754 
755 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
756 {
757 	lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
758 
759 	if (!rqos->disk->queue->debugfs_dir)
760 		return;
761 	debugfs_remove_recursive(rqos->debugfs_dir);
762 	rqos->debugfs_dir = NULL;
763 }
764 
765 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
766 {
767 	struct request_queue *q = rqos->disk->queue;
768 	const char *dir_name = rq_qos_id_to_name(rqos->id);
769 
770 	lockdep_assert_held(&q->debugfs_mutex);
771 
772 	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
773 		return;
774 
775 	if (!q->rqos_debugfs_dir)
776 		q->rqos_debugfs_dir = debugfs_create_dir("rqos",
777 							 q->debugfs_dir);
778 
779 	rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
780 	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
781 }
782 
783 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
784 					struct blk_mq_hw_ctx *hctx)
785 {
786 	struct elevator_type *e = q->elevator->type;
787 
788 	lockdep_assert_held(&q->debugfs_mutex);
789 
790 	/*
791 	 * If the parent debugfs directory has not been created yet, return;
792 	 * We will be called again later on with appropriate parent debugfs
793 	 * directory from blk_register_queue()
794 	 */
795 	if (!hctx->debugfs_dir)
796 		return;
797 
798 	if (!e->hctx_debugfs_attrs)
799 		return;
800 
801 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
802 						     hctx->debugfs_dir);
803 	debugfs_create_files(hctx->sched_debugfs_dir, hctx,
804 			     e->hctx_debugfs_attrs);
805 }
806 
807 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
808 {
809 	lockdep_assert_held(&hctx->queue->debugfs_mutex);
810 
811 	if (!hctx->queue->debugfs_dir)
812 		return;
813 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
814 	hctx->sched_debugfs_dir = NULL;
815 }
816