xref: /linux/io_uring/fdinfo.c (revision 5832d26433f2bd0d28f8b12526e3c2fdb203507f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/proc_fs.h>
7 #include <linux/seq_file.h>
8 #include <linux/io_uring.h>
9 
10 #include <uapi/linux/io_uring.h>
11 
12 #include "filetable.h"
13 #include "sqpoll.h"
14 #include "fdinfo.h"
15 #include "cancel.h"
16 #include "rsrc.h"
17 
18 #ifdef CONFIG_NET_RX_BUSY_POLL
19 static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx,
20 					       struct seq_file *m,
21 					       const char *tracking_strategy)
22 {
23 	seq_puts(m, "NAPI:\tenabled\n");
24 	seq_printf(m, "napi tracking:\t%s\n", tracking_strategy);
25 	seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt);
26 	if (ctx->napi_prefer_busy_poll)
27 		seq_puts(m, "napi_prefer_busy_poll:\ttrue\n");
28 	else
29 		seq_puts(m, "napi_prefer_busy_poll:\tfalse\n");
30 }
31 
32 static __cold void napi_show_fdinfo(struct io_ring_ctx *ctx,
33 				    struct seq_file *m)
34 {
35 	unsigned int mode = READ_ONCE(ctx->napi_track_mode);
36 
37 	switch (mode) {
38 	case IO_URING_NAPI_TRACKING_INACTIVE:
39 		seq_puts(m, "NAPI:\tdisabled\n");
40 		break;
41 	case IO_URING_NAPI_TRACKING_DYNAMIC:
42 		common_tracking_show_fdinfo(ctx, m, "dynamic");
43 		break;
44 	case IO_URING_NAPI_TRACKING_STATIC:
45 		common_tracking_show_fdinfo(ctx, m, "static");
46 		break;
47 	default:
48 		seq_printf(m, "NAPI:\tunknown mode (%u)\n", mode);
49 	}
50 }
51 #else
52 static inline void napi_show_fdinfo(struct io_ring_ctx *ctx,
53 				    struct seq_file *m)
54 {
55 }
56 #endif
57 
58 static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
59 {
60 	struct io_overflow_cqe *ocqe;
61 	struct io_rings *r = ctx->rings;
62 	struct rusage sq_usage;
63 	unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
64 	unsigned int sq_head = READ_ONCE(r->sq.head);
65 	unsigned int sq_tail = READ_ONCE(r->sq.tail);
66 	unsigned int cq_head = READ_ONCE(r->cq.head);
67 	unsigned int cq_tail = READ_ONCE(r->cq.tail);
68 	unsigned int sq_shift = 0;
69 	unsigned int sq_entries;
70 	int sq_pid = -1, sq_cpu = -1;
71 	u64 sq_total_time = 0, sq_work_time = 0;
72 	unsigned int i;
73 
74 	if (ctx->flags & IORING_SETUP_SQE128)
75 		sq_shift = 1;
76 
77 	/*
78 	 * we may get imprecise sqe and cqe info if uring is actively running
79 	 * since we get cached_sq_head and cached_cq_tail without uring_lock
80 	 * and sq_tail and cq_head are changed by userspace. But it's ok since
81 	 * we usually use these info when it is stuck.
82 	 */
83 	seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
84 	seq_printf(m, "SqHead:\t%u\n", sq_head);
85 	seq_printf(m, "SqTail:\t%u\n", sq_tail);
86 	seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head));
87 	seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
88 	seq_printf(m, "CqHead:\t%u\n", cq_head);
89 	seq_printf(m, "CqTail:\t%u\n", cq_tail);
90 	seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail));
91 	seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
92 	sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
93 	for (i = 0; i < sq_entries; i++) {
94 		unsigned int entry = i + sq_head;
95 		struct io_uring_sqe *sqe;
96 		unsigned int sq_idx;
97 
98 		if (ctx->flags & IORING_SETUP_NO_SQARRAY)
99 			break;
100 		sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
101 		if (sq_idx > sq_mask)
102 			continue;
103 		sqe = &ctx->sq_sqes[sq_idx << sq_shift];
104 		seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
105 			      "addr:0x%llx, rw_flags:0x%x, buf_index:%d "
106 			      "user_data:%llu",
107 			   sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
108 			   sqe->flags, (unsigned long long) sqe->off,
109 			   (unsigned long long) sqe->addr, sqe->rw_flags,
110 			   sqe->buf_index, sqe->user_data);
111 		if (sq_shift) {
112 			u64 *sqeb = (void *) (sqe + 1);
113 			int size = sizeof(struct io_uring_sqe) / sizeof(u64);
114 			int j;
115 
116 			for (j = 0; j < size; j++) {
117 				seq_printf(m, ", e%d:0x%llx", j,
118 						(unsigned long long) *sqeb);
119 				sqeb++;
120 			}
121 		}
122 		seq_printf(m, "\n");
123 	}
124 	seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
125 	while (cq_head < cq_tail) {
126 		struct io_uring_cqe *cqe;
127 		bool cqe32 = false;
128 
129 		cqe = &r->cqes[(cq_head & cq_mask)];
130 		if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32)
131 			cqe32 = true;
132 		seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
133 			   cq_head & cq_mask, cqe->user_data, cqe->res,
134 			   cqe->flags);
135 		if (cqe32)
136 			seq_printf(m, ", extra1:%llu, extra2:%llu\n",
137 					cqe->big_cqe[0], cqe->big_cqe[1]);
138 		seq_printf(m, "\n");
139 		cq_head++;
140 		if (cqe32)
141 			cq_head++;
142 	}
143 
144 	if (ctx->flags & IORING_SETUP_SQPOLL) {
145 		struct io_sq_data *sq = ctx->sq_data;
146 		struct task_struct *tsk;
147 
148 		rcu_read_lock();
149 		tsk = rcu_dereference(sq->thread);
150 		/*
151 		 * sq->thread might be NULL if we raced with the sqpoll
152 		 * thread termination.
153 		 */
154 		if (tsk) {
155 			get_task_struct(tsk);
156 			rcu_read_unlock();
157 			getrusage(tsk, RUSAGE_SELF, &sq_usage);
158 			put_task_struct(tsk);
159 			sq_pid = sq->task_pid;
160 			sq_cpu = sq->sq_cpu;
161 			sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000
162 					 + sq_usage.ru_stime.tv_usec);
163 			sq_work_time = sq->work_time;
164 		} else {
165 			rcu_read_unlock();
166 		}
167 	}
168 
169 	seq_printf(m, "SqThread:\t%d\n", sq_pid);
170 	seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
171 	seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
172 	seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
173 	seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr);
174 	for (i = 0; i < ctx->file_table.data.nr; i++) {
175 		struct file *f = NULL;
176 
177 		if (ctx->file_table.data.nodes[i])
178 			f = io_slot_file(ctx->file_table.data.nodes[i]);
179 		if (f) {
180 			seq_printf(m, "%5u: ", i);
181 			seq_file_path(m, f, " \t\n\\");
182 			seq_puts(m, "\n");
183 		}
184 	}
185 	seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
186 	for (i = 0; i < ctx->buf_table.nr; i++) {
187 		struct io_mapped_ubuf *buf = NULL;
188 
189 		if (ctx->buf_table.nodes[i])
190 			buf = ctx->buf_table.nodes[i]->buf;
191 		if (buf)
192 			seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
193 		else
194 			seq_printf(m, "%5u: <none>\n", i);
195 	}
196 
197 	seq_puts(m, "PollList:\n");
198 	for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
199 		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
200 		struct io_kiocb *req;
201 
202 		hlist_for_each_entry(req, &hb->list, hash_node)
203 			seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
204 					task_work_pending(req->tctx->task));
205 	}
206 
207 	seq_puts(m, "CqOverflowList:\n");
208 	spin_lock(&ctx->completion_lock);
209 	list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
210 		struct io_uring_cqe *cqe = &ocqe->cqe;
211 
212 		seq_printf(m, "  user_data=%llu, res=%d, flags=%x\n",
213 			   cqe->user_data, cqe->res, cqe->flags);
214 
215 	}
216 	spin_unlock(&ctx->completion_lock);
217 	napi_show_fdinfo(ctx, m);
218 }
219 
220 /*
221  * Caller holds a reference to the file already, we don't need to do
222  * anything else to get an extra reference.
223  */
224 __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
225 {
226 	struct io_ring_ctx *ctx = file->private_data;
227 
228 	/*
229 	 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
230 	 * since fdinfo case grabs it in the opposite direction of normal use
231 	 * cases.
232 	 */
233 	if (mutex_trylock(&ctx->uring_lock)) {
234 		__io_uring_show_fdinfo(ctx, m);
235 		mutex_unlock(&ctx->uring_lock);
236 	}
237 }
238