xref: /linux/block/blk-mq-debugfs.c (revision ed00aabd5eb9fb44d6aff1173234a2e911b9fead)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Facebook
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9 
10 #include <linux/blk-mq.h>
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
16 
17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
18 {
19 	if (stat->nr_samples) {
20 		seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 			   stat->nr_samples, stat->mean, stat->min, stat->max);
22 	} else {
23 		seq_puts(m, "samples=0");
24 	}
25 }
26 
27 static int queue_poll_stat_show(void *data, struct seq_file *m)
28 {
29 	struct request_queue *q = data;
30 	int bucket;
31 
32 	for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
33 		seq_printf(m, "read  (%d Bytes): ", 1 << (9 + bucket));
34 		print_stat(m, &q->poll_stat[2 * bucket]);
35 		seq_puts(m, "\n");
36 
37 		seq_printf(m, "write (%d Bytes): ",  1 << (9 + bucket));
38 		print_stat(m, &q->poll_stat[2 * bucket + 1]);
39 		seq_puts(m, "\n");
40 	}
41 	return 0;
42 }
43 
44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 	__acquires(&q->requeue_lock)
46 {
47 	struct request_queue *q = m->private;
48 
49 	spin_lock_irq(&q->requeue_lock);
50 	return seq_list_start(&q->requeue_list, *pos);
51 }
52 
53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
54 {
55 	struct request_queue *q = m->private;
56 
57 	return seq_list_next(v, &q->requeue_list, pos);
58 }
59 
60 static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 	__releases(&q->requeue_lock)
62 {
63 	struct request_queue *q = m->private;
64 
65 	spin_unlock_irq(&q->requeue_lock);
66 }
67 
68 static const struct seq_operations queue_requeue_list_seq_ops = {
69 	.start	= queue_requeue_list_start,
70 	.next	= queue_requeue_list_next,
71 	.stop	= queue_requeue_list_stop,
72 	.show	= blk_mq_debugfs_rq_show,
73 };
74 
75 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 			  const char *const *flag_name, int flag_name_count)
77 {
78 	bool sep = false;
79 	int i;
80 
81 	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 		if (!(flags & BIT(i)))
83 			continue;
84 		if (sep)
85 			seq_puts(m, "|");
86 		sep = true;
87 		if (i < flag_name_count && flag_name[i])
88 			seq_puts(m, flag_name[i]);
89 		else
90 			seq_printf(m, "%d", i);
91 	}
92 	return 0;
93 }
94 
95 static int queue_pm_only_show(void *data, struct seq_file *m)
96 {
97 	struct request_queue *q = data;
98 
99 	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
100 	return 0;
101 }
102 
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name[] = {
105 	QUEUE_FLAG_NAME(STOPPED),
106 	QUEUE_FLAG_NAME(DYING),
107 	QUEUE_FLAG_NAME(NOMERGES),
108 	QUEUE_FLAG_NAME(SAME_COMP),
109 	QUEUE_FLAG_NAME(FAIL_IO),
110 	QUEUE_FLAG_NAME(NONROT),
111 	QUEUE_FLAG_NAME(IO_STAT),
112 	QUEUE_FLAG_NAME(DISCARD),
113 	QUEUE_FLAG_NAME(NOXMERGES),
114 	QUEUE_FLAG_NAME(ADD_RANDOM),
115 	QUEUE_FLAG_NAME(SECERASE),
116 	QUEUE_FLAG_NAME(SAME_FORCE),
117 	QUEUE_FLAG_NAME(DEAD),
118 	QUEUE_FLAG_NAME(INIT_DONE),
119 	QUEUE_FLAG_NAME(POLL),
120 	QUEUE_FLAG_NAME(WC),
121 	QUEUE_FLAG_NAME(FUA),
122 	QUEUE_FLAG_NAME(DAX),
123 	QUEUE_FLAG_NAME(STATS),
124 	QUEUE_FLAG_NAME(POLL_STATS),
125 	QUEUE_FLAG_NAME(REGISTERED),
126 	QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
127 	QUEUE_FLAG_NAME(QUIESCED),
128 };
129 #undef QUEUE_FLAG_NAME
130 
131 static int queue_state_show(void *data, struct seq_file *m)
132 {
133 	struct request_queue *q = data;
134 
135 	blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
136 		       ARRAY_SIZE(blk_queue_flag_name));
137 	seq_puts(m, "\n");
138 	return 0;
139 }
140 
141 static ssize_t queue_state_write(void *data, const char __user *buf,
142 				 size_t count, loff_t *ppos)
143 {
144 	struct request_queue *q = data;
145 	char opbuf[16] = { }, *op;
146 
147 	/*
148 	 * The "state" attribute is removed after blk_cleanup_queue() has called
149 	 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
150 	 * triggering a use-after-free.
151 	 */
152 	if (blk_queue_dead(q))
153 		return -ENOENT;
154 
155 	if (count >= sizeof(opbuf)) {
156 		pr_err("%s: operation too long\n", __func__);
157 		goto inval;
158 	}
159 
160 	if (copy_from_user(opbuf, buf, count))
161 		return -EFAULT;
162 	op = strstrip(opbuf);
163 	if (strcmp(op, "run") == 0) {
164 		blk_mq_run_hw_queues(q, true);
165 	} else if (strcmp(op, "start") == 0) {
166 		blk_mq_start_stopped_hw_queues(q, true);
167 	} else if (strcmp(op, "kick") == 0) {
168 		blk_mq_kick_requeue_list(q);
169 	} else {
170 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
171 inval:
172 		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
173 		return -EINVAL;
174 	}
175 	return count;
176 }
177 
178 static int queue_write_hint_show(void *data, struct seq_file *m)
179 {
180 	struct request_queue *q = data;
181 	int i;
182 
183 	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
184 		seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
185 
186 	return 0;
187 }
188 
189 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
190 				      size_t count, loff_t *ppos)
191 {
192 	struct request_queue *q = data;
193 	int i;
194 
195 	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
196 		q->write_hints[i] = 0;
197 
198 	return count;
199 }
200 
201 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
202 	{ "poll_stat", 0400, queue_poll_stat_show },
203 	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
204 	{ "pm_only", 0600, queue_pm_only_show, NULL },
205 	{ "state", 0600, queue_state_show, queue_state_write },
206 	{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
207 	{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
208 	{ },
209 };
210 
211 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
212 static const char *const hctx_state_name[] = {
213 	HCTX_STATE_NAME(STOPPED),
214 	HCTX_STATE_NAME(TAG_ACTIVE),
215 	HCTX_STATE_NAME(SCHED_RESTART),
216 	HCTX_STATE_NAME(INACTIVE),
217 };
218 #undef HCTX_STATE_NAME
219 
220 static int hctx_state_show(void *data, struct seq_file *m)
221 {
222 	struct blk_mq_hw_ctx *hctx = data;
223 
224 	blk_flags_show(m, hctx->state, hctx_state_name,
225 		       ARRAY_SIZE(hctx_state_name));
226 	seq_puts(m, "\n");
227 	return 0;
228 }
229 
230 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
231 static const char *const alloc_policy_name[] = {
232 	BLK_TAG_ALLOC_NAME(FIFO),
233 	BLK_TAG_ALLOC_NAME(RR),
234 };
235 #undef BLK_TAG_ALLOC_NAME
236 
237 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
238 static const char *const hctx_flag_name[] = {
239 	HCTX_FLAG_NAME(SHOULD_MERGE),
240 	HCTX_FLAG_NAME(TAG_SHARED),
241 	HCTX_FLAG_NAME(BLOCKING),
242 	HCTX_FLAG_NAME(NO_SCHED),
243 	HCTX_FLAG_NAME(STACKING),
244 };
245 #undef HCTX_FLAG_NAME
246 
247 static int hctx_flags_show(void *data, struct seq_file *m)
248 {
249 	struct blk_mq_hw_ctx *hctx = data;
250 	const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
251 
252 	seq_puts(m, "alloc_policy=");
253 	if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
254 	    alloc_policy_name[alloc_policy])
255 		seq_puts(m, alloc_policy_name[alloc_policy]);
256 	else
257 		seq_printf(m, "%d", alloc_policy);
258 	seq_puts(m, " ");
259 	blk_flags_show(m,
260 		       hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
261 		       hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
262 	seq_puts(m, "\n");
263 	return 0;
264 }
265 
266 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
267 static const char *const cmd_flag_name[] = {
268 	CMD_FLAG_NAME(FAILFAST_DEV),
269 	CMD_FLAG_NAME(FAILFAST_TRANSPORT),
270 	CMD_FLAG_NAME(FAILFAST_DRIVER),
271 	CMD_FLAG_NAME(SYNC),
272 	CMD_FLAG_NAME(META),
273 	CMD_FLAG_NAME(PRIO),
274 	CMD_FLAG_NAME(NOMERGE),
275 	CMD_FLAG_NAME(IDLE),
276 	CMD_FLAG_NAME(INTEGRITY),
277 	CMD_FLAG_NAME(FUA),
278 	CMD_FLAG_NAME(PREFLUSH),
279 	CMD_FLAG_NAME(RAHEAD),
280 	CMD_FLAG_NAME(BACKGROUND),
281 	CMD_FLAG_NAME(NOWAIT),
282 	CMD_FLAG_NAME(NOUNMAP),
283 	CMD_FLAG_NAME(HIPRI),
284 };
285 #undef CMD_FLAG_NAME
286 
287 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
288 static const char *const rqf_name[] = {
289 	RQF_NAME(SORTED),
290 	RQF_NAME(STARTED),
291 	RQF_NAME(SOFTBARRIER),
292 	RQF_NAME(FLUSH_SEQ),
293 	RQF_NAME(MIXED_MERGE),
294 	RQF_NAME(MQ_INFLIGHT),
295 	RQF_NAME(DONTPREP),
296 	RQF_NAME(PREEMPT),
297 	RQF_NAME(FAILED),
298 	RQF_NAME(QUIET),
299 	RQF_NAME(ELVPRIV),
300 	RQF_NAME(IO_STAT),
301 	RQF_NAME(ALLOCED),
302 	RQF_NAME(PM),
303 	RQF_NAME(HASHED),
304 	RQF_NAME(STATS),
305 	RQF_NAME(SPECIAL_PAYLOAD),
306 	RQF_NAME(ZONE_WRITE_LOCKED),
307 	RQF_NAME(MQ_POLL_SLEPT),
308 };
309 #undef RQF_NAME
310 
311 static const char *const blk_mq_rq_state_name_array[] = {
312 	[MQ_RQ_IDLE]		= "idle",
313 	[MQ_RQ_IN_FLIGHT]	= "in_flight",
314 	[MQ_RQ_COMPLETE]	= "complete",
315 };
316 
317 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
318 {
319 	if (WARN_ON_ONCE((unsigned int)rq_state >=
320 			 ARRAY_SIZE(blk_mq_rq_state_name_array)))
321 		return "(?)";
322 	return blk_mq_rq_state_name_array[rq_state];
323 }
324 
325 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
326 {
327 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
328 	const unsigned int op = req_op(rq);
329 	const char *op_str = blk_op_str(op);
330 
331 	seq_printf(m, "%p {.op=", rq);
332 	if (strcmp(op_str, "UNKNOWN") == 0)
333 		seq_printf(m, "%u", op);
334 	else
335 		seq_printf(m, "%s", op_str);
336 	seq_puts(m, ", .cmd_flags=");
337 	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
338 		       ARRAY_SIZE(cmd_flag_name));
339 	seq_puts(m, ", .rq_flags=");
340 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
341 		       ARRAY_SIZE(rqf_name));
342 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
343 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
344 		   rq->internal_tag);
345 	if (mq_ops->show_rq)
346 		mq_ops->show_rq(m, rq);
347 	seq_puts(m, "}\n");
348 	return 0;
349 }
350 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
351 
352 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
353 {
354 	return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
355 }
356 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
357 
358 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
359 	__acquires(&hctx->lock)
360 {
361 	struct blk_mq_hw_ctx *hctx = m->private;
362 
363 	spin_lock(&hctx->lock);
364 	return seq_list_start(&hctx->dispatch, *pos);
365 }
366 
367 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
368 {
369 	struct blk_mq_hw_ctx *hctx = m->private;
370 
371 	return seq_list_next(v, &hctx->dispatch, pos);
372 }
373 
374 static void hctx_dispatch_stop(struct seq_file *m, void *v)
375 	__releases(&hctx->lock)
376 {
377 	struct blk_mq_hw_ctx *hctx = m->private;
378 
379 	spin_unlock(&hctx->lock);
380 }
381 
382 static const struct seq_operations hctx_dispatch_seq_ops = {
383 	.start	= hctx_dispatch_start,
384 	.next	= hctx_dispatch_next,
385 	.stop	= hctx_dispatch_stop,
386 	.show	= blk_mq_debugfs_rq_show,
387 };
388 
389 struct show_busy_params {
390 	struct seq_file		*m;
391 	struct blk_mq_hw_ctx	*hctx;
392 };
393 
394 /*
395  * Note: the state of a request may change while this function is in progress,
396  * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
397  * keep iterating requests.
398  */
399 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
400 {
401 	const struct show_busy_params *params = data;
402 
403 	if (rq->mq_hctx == params->hctx)
404 		__blk_mq_debugfs_rq_show(params->m, rq);
405 
406 	return true;
407 }
408 
409 static int hctx_busy_show(void *data, struct seq_file *m)
410 {
411 	struct blk_mq_hw_ctx *hctx = data;
412 	struct show_busy_params params = { .m = m, .hctx = hctx };
413 
414 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
415 				&params);
416 
417 	return 0;
418 }
419 
420 static const char *const hctx_types[] = {
421 	[HCTX_TYPE_DEFAULT]	= "default",
422 	[HCTX_TYPE_READ]	= "read",
423 	[HCTX_TYPE_POLL]	= "poll",
424 };
425 
426 static int hctx_type_show(void *data, struct seq_file *m)
427 {
428 	struct blk_mq_hw_ctx *hctx = data;
429 
430 	BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
431 	seq_printf(m, "%s\n", hctx_types[hctx->type]);
432 	return 0;
433 }
434 
435 static int hctx_ctx_map_show(void *data, struct seq_file *m)
436 {
437 	struct blk_mq_hw_ctx *hctx = data;
438 
439 	sbitmap_bitmap_show(&hctx->ctx_map, m);
440 	return 0;
441 }
442 
443 static void blk_mq_debugfs_tags_show(struct seq_file *m,
444 				     struct blk_mq_tags *tags)
445 {
446 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
447 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
448 	seq_printf(m, "active_queues=%d\n",
449 		   atomic_read(&tags->active_queues));
450 
451 	seq_puts(m, "\nbitmap_tags:\n");
452 	sbitmap_queue_show(&tags->bitmap_tags, m);
453 
454 	if (tags->nr_reserved_tags) {
455 		seq_puts(m, "\nbreserved_tags:\n");
456 		sbitmap_queue_show(&tags->breserved_tags, m);
457 	}
458 }
459 
460 static int hctx_tags_show(void *data, struct seq_file *m)
461 {
462 	struct blk_mq_hw_ctx *hctx = data;
463 	struct request_queue *q = hctx->queue;
464 	int res;
465 
466 	res = mutex_lock_interruptible(&q->sysfs_lock);
467 	if (res)
468 		goto out;
469 	if (hctx->tags)
470 		blk_mq_debugfs_tags_show(m, hctx->tags);
471 	mutex_unlock(&q->sysfs_lock);
472 
473 out:
474 	return res;
475 }
476 
477 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
478 {
479 	struct blk_mq_hw_ctx *hctx = data;
480 	struct request_queue *q = hctx->queue;
481 	int res;
482 
483 	res = mutex_lock_interruptible(&q->sysfs_lock);
484 	if (res)
485 		goto out;
486 	if (hctx->tags)
487 		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
488 	mutex_unlock(&q->sysfs_lock);
489 
490 out:
491 	return res;
492 }
493 
494 static int hctx_sched_tags_show(void *data, struct seq_file *m)
495 {
496 	struct blk_mq_hw_ctx *hctx = data;
497 	struct request_queue *q = hctx->queue;
498 	int res;
499 
500 	res = mutex_lock_interruptible(&q->sysfs_lock);
501 	if (res)
502 		goto out;
503 	if (hctx->sched_tags)
504 		blk_mq_debugfs_tags_show(m, hctx->sched_tags);
505 	mutex_unlock(&q->sysfs_lock);
506 
507 out:
508 	return res;
509 }
510 
511 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
512 {
513 	struct blk_mq_hw_ctx *hctx = data;
514 	struct request_queue *q = hctx->queue;
515 	int res;
516 
517 	res = mutex_lock_interruptible(&q->sysfs_lock);
518 	if (res)
519 		goto out;
520 	if (hctx->sched_tags)
521 		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
522 	mutex_unlock(&q->sysfs_lock);
523 
524 out:
525 	return res;
526 }
527 
528 static int hctx_io_poll_show(void *data, struct seq_file *m)
529 {
530 	struct blk_mq_hw_ctx *hctx = data;
531 
532 	seq_printf(m, "considered=%lu\n", hctx->poll_considered);
533 	seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
534 	seq_printf(m, "success=%lu\n", hctx->poll_success);
535 	return 0;
536 }
537 
538 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
539 				  size_t count, loff_t *ppos)
540 {
541 	struct blk_mq_hw_ctx *hctx = data;
542 
543 	hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
544 	return count;
545 }
546 
547 static int hctx_dispatched_show(void *data, struct seq_file *m)
548 {
549 	struct blk_mq_hw_ctx *hctx = data;
550 	int i;
551 
552 	seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
553 
554 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
555 		unsigned int d = 1U << (i - 1);
556 
557 		seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
558 	}
559 
560 	seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
561 	return 0;
562 }
563 
564 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
565 				     size_t count, loff_t *ppos)
566 {
567 	struct blk_mq_hw_ctx *hctx = data;
568 	int i;
569 
570 	for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
571 		hctx->dispatched[i] = 0;
572 	return count;
573 }
574 
575 static int hctx_queued_show(void *data, struct seq_file *m)
576 {
577 	struct blk_mq_hw_ctx *hctx = data;
578 
579 	seq_printf(m, "%lu\n", hctx->queued);
580 	return 0;
581 }
582 
583 static ssize_t hctx_queued_write(void *data, const char __user *buf,
584 				 size_t count, loff_t *ppos)
585 {
586 	struct blk_mq_hw_ctx *hctx = data;
587 
588 	hctx->queued = 0;
589 	return count;
590 }
591 
592 static int hctx_run_show(void *data, struct seq_file *m)
593 {
594 	struct blk_mq_hw_ctx *hctx = data;
595 
596 	seq_printf(m, "%lu\n", hctx->run);
597 	return 0;
598 }
599 
600 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
601 			      loff_t *ppos)
602 {
603 	struct blk_mq_hw_ctx *hctx = data;
604 
605 	hctx->run = 0;
606 	return count;
607 }
608 
609 static int hctx_active_show(void *data, struct seq_file *m)
610 {
611 	struct blk_mq_hw_ctx *hctx = data;
612 
613 	seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
614 	return 0;
615 }
616 
617 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
618 {
619 	struct blk_mq_hw_ctx *hctx = data;
620 
621 	seq_printf(m, "%u\n", hctx->dispatch_busy);
622 	return 0;
623 }
624 
625 #define CTX_RQ_SEQ_OPS(name, type)					\
626 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
627 	__acquires(&ctx->lock)						\
628 {									\
629 	struct blk_mq_ctx *ctx = m->private;				\
630 									\
631 	spin_lock(&ctx->lock);						\
632 	return seq_list_start(&ctx->rq_lists[type], *pos);		\
633 }									\
634 									\
635 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v,	\
636 				     loff_t *pos)			\
637 {									\
638 	struct blk_mq_ctx *ctx = m->private;				\
639 									\
640 	return seq_list_next(v, &ctx->rq_lists[type], pos);		\
641 }									\
642 									\
643 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v)	\
644 	__releases(&ctx->lock)						\
645 {									\
646 	struct blk_mq_ctx *ctx = m->private;				\
647 									\
648 	spin_unlock(&ctx->lock);					\
649 }									\
650 									\
651 static const struct seq_operations ctx_##name##_rq_list_seq_ops = {	\
652 	.start	= ctx_##name##_rq_list_start,				\
653 	.next	= ctx_##name##_rq_list_next,				\
654 	.stop	= ctx_##name##_rq_list_stop,				\
655 	.show	= blk_mq_debugfs_rq_show,				\
656 }
657 
658 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
659 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
660 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
661 
662 static int ctx_dispatched_show(void *data, struct seq_file *m)
663 {
664 	struct blk_mq_ctx *ctx = data;
665 
666 	seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
667 	return 0;
668 }
669 
670 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
671 				    size_t count, loff_t *ppos)
672 {
673 	struct blk_mq_ctx *ctx = data;
674 
675 	ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
676 	return count;
677 }
678 
679 static int ctx_merged_show(void *data, struct seq_file *m)
680 {
681 	struct blk_mq_ctx *ctx = data;
682 
683 	seq_printf(m, "%lu\n", ctx->rq_merged);
684 	return 0;
685 }
686 
687 static ssize_t ctx_merged_write(void *data, const char __user *buf,
688 				size_t count, loff_t *ppos)
689 {
690 	struct blk_mq_ctx *ctx = data;
691 
692 	ctx->rq_merged = 0;
693 	return count;
694 }
695 
696 static int ctx_completed_show(void *data, struct seq_file *m)
697 {
698 	struct blk_mq_ctx *ctx = data;
699 
700 	seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
701 	return 0;
702 }
703 
704 static ssize_t ctx_completed_write(void *data, const char __user *buf,
705 				   size_t count, loff_t *ppos)
706 {
707 	struct blk_mq_ctx *ctx = data;
708 
709 	ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
710 	return count;
711 }
712 
713 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
714 {
715 	const struct blk_mq_debugfs_attr *attr = m->private;
716 	void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
717 
718 	return attr->show(data, m);
719 }
720 
721 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
722 				    size_t count, loff_t *ppos)
723 {
724 	struct seq_file *m = file->private_data;
725 	const struct blk_mq_debugfs_attr *attr = m->private;
726 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
727 
728 	/*
729 	 * Attributes that only implement .seq_ops are read-only and 'attr' is
730 	 * the same with 'data' in this case.
731 	 */
732 	if (attr == data || !attr->write)
733 		return -EPERM;
734 
735 	return attr->write(data, buf, count, ppos);
736 }
737 
738 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
739 {
740 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
741 	void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
742 	struct seq_file *m;
743 	int ret;
744 
745 	if (attr->seq_ops) {
746 		ret = seq_open(file, attr->seq_ops);
747 		if (!ret) {
748 			m = file->private_data;
749 			m->private = data;
750 		}
751 		return ret;
752 	}
753 
754 	if (WARN_ON_ONCE(!attr->show))
755 		return -EPERM;
756 
757 	return single_open(file, blk_mq_debugfs_show, inode->i_private);
758 }
759 
760 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
761 {
762 	const struct blk_mq_debugfs_attr *attr = inode->i_private;
763 
764 	if (attr->show)
765 		return single_release(inode, file);
766 
767 	return seq_release(inode, file);
768 }
769 
770 static const struct file_operations blk_mq_debugfs_fops = {
771 	.open		= blk_mq_debugfs_open,
772 	.read		= seq_read,
773 	.write		= blk_mq_debugfs_write,
774 	.llseek		= seq_lseek,
775 	.release	= blk_mq_debugfs_release,
776 };
777 
778 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
779 	{"state", 0400, hctx_state_show},
780 	{"flags", 0400, hctx_flags_show},
781 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
782 	{"busy", 0400, hctx_busy_show},
783 	{"ctx_map", 0400, hctx_ctx_map_show},
784 	{"tags", 0400, hctx_tags_show},
785 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
786 	{"sched_tags", 0400, hctx_sched_tags_show},
787 	{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
788 	{"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
789 	{"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
790 	{"queued", 0600, hctx_queued_show, hctx_queued_write},
791 	{"run", 0600, hctx_run_show, hctx_run_write},
792 	{"active", 0400, hctx_active_show},
793 	{"dispatch_busy", 0400, hctx_dispatch_busy_show},
794 	{"type", 0400, hctx_type_show},
795 	{},
796 };
797 
798 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
799 	{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
800 	{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
801 	{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
802 	{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
803 	{"merged", 0600, ctx_merged_show, ctx_merged_write},
804 	{"completed", 0600, ctx_completed_show, ctx_completed_write},
805 	{},
806 };
807 
808 static void debugfs_create_files(struct dentry *parent, void *data,
809 				 const struct blk_mq_debugfs_attr *attr)
810 {
811 	if (IS_ERR_OR_NULL(parent))
812 		return;
813 
814 	d_inode(parent)->i_private = data;
815 
816 	for (; attr->name; attr++)
817 		debugfs_create_file(attr->name, attr->mode, parent,
818 				    (void *)attr, &blk_mq_debugfs_fops);
819 }
820 
821 void blk_mq_debugfs_register(struct request_queue *q)
822 {
823 	struct blk_mq_hw_ctx *hctx;
824 	int i;
825 
826 	debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
827 
828 	/*
829 	 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
830 	 * didn't exist yet (because we don't know what to name the directory
831 	 * until the queue is registered to a gendisk).
832 	 */
833 	if (q->elevator && !q->sched_debugfs_dir)
834 		blk_mq_debugfs_register_sched(q);
835 
836 	/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
837 	queue_for_each_hw_ctx(q, hctx, i) {
838 		if (!hctx->debugfs_dir)
839 			blk_mq_debugfs_register_hctx(q, hctx);
840 		if (q->elevator && !hctx->sched_debugfs_dir)
841 			blk_mq_debugfs_register_sched_hctx(q, hctx);
842 	}
843 
844 	if (q->rq_qos) {
845 		struct rq_qos *rqos = q->rq_qos;
846 
847 		while (rqos) {
848 			blk_mq_debugfs_register_rqos(rqos);
849 			rqos = rqos->next;
850 		}
851 	}
852 }
853 
854 void blk_mq_debugfs_unregister(struct request_queue *q)
855 {
856 	q->sched_debugfs_dir = NULL;
857 }
858 
859 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
860 					struct blk_mq_ctx *ctx)
861 {
862 	struct dentry *ctx_dir;
863 	char name[20];
864 
865 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
866 	ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
867 
868 	debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
869 }
870 
871 void blk_mq_debugfs_register_hctx(struct request_queue *q,
872 				  struct blk_mq_hw_ctx *hctx)
873 {
874 	struct blk_mq_ctx *ctx;
875 	char name[20];
876 	int i;
877 
878 	snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
879 	hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
880 
881 	debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
882 
883 	hctx_for_each_ctx(hctx, ctx, i)
884 		blk_mq_debugfs_register_ctx(hctx, ctx);
885 }
886 
887 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
888 {
889 	debugfs_remove_recursive(hctx->debugfs_dir);
890 	hctx->sched_debugfs_dir = NULL;
891 	hctx->debugfs_dir = NULL;
892 }
893 
894 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
895 {
896 	struct blk_mq_hw_ctx *hctx;
897 	int i;
898 
899 	queue_for_each_hw_ctx(q, hctx, i)
900 		blk_mq_debugfs_register_hctx(q, hctx);
901 }
902 
903 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
904 {
905 	struct blk_mq_hw_ctx *hctx;
906 	int i;
907 
908 	queue_for_each_hw_ctx(q, hctx, i)
909 		blk_mq_debugfs_unregister_hctx(hctx);
910 }
911 
912 void blk_mq_debugfs_register_sched(struct request_queue *q)
913 {
914 	struct elevator_type *e = q->elevator->type;
915 
916 	/*
917 	 * If the parent directory has not been created yet, return, we will be
918 	 * called again later on and the directory/files will be created then.
919 	 */
920 	if (!q->debugfs_dir)
921 		return;
922 
923 	if (!e->queue_debugfs_attrs)
924 		return;
925 
926 	q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
927 
928 	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
929 }
930 
931 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
932 {
933 	debugfs_remove_recursive(q->sched_debugfs_dir);
934 	q->sched_debugfs_dir = NULL;
935 }
936 
937 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
938 {
939 	debugfs_remove_recursive(rqos->debugfs_dir);
940 	rqos->debugfs_dir = NULL;
941 }
942 
943 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
944 {
945 	struct request_queue *q = rqos->q;
946 	const char *dir_name = rq_qos_id_to_name(rqos->id);
947 
948 	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
949 		return;
950 
951 	if (!q->rqos_debugfs_dir)
952 		q->rqos_debugfs_dir = debugfs_create_dir("rqos",
953 							 q->debugfs_dir);
954 
955 	rqos->debugfs_dir = debugfs_create_dir(dir_name,
956 					       rqos->q->rqos_debugfs_dir);
957 
958 	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
959 }
960 
961 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
962 {
963 	debugfs_remove_recursive(q->rqos_debugfs_dir);
964 	q->rqos_debugfs_dir = NULL;
965 }
966 
967 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
968 					struct blk_mq_hw_ctx *hctx)
969 {
970 	struct elevator_type *e = q->elevator->type;
971 
972 	if (!e->hctx_debugfs_attrs)
973 		return;
974 
975 	hctx->sched_debugfs_dir = debugfs_create_dir("sched",
976 						     hctx->debugfs_dir);
977 	debugfs_create_files(hctx->sched_debugfs_dir, hctx,
978 			     e->hctx_debugfs_attrs);
979 }
980 
981 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
982 {
983 	debugfs_remove_recursive(hctx->sched_debugfs_dir);
984 	hctx->sched_debugfs_dir = NULL;
985 }
986