1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/proc_fs.h> 7 #include <linux/seq_file.h> 8 #include <linux/io_uring.h> 9 10 #include <uapi/linux/io_uring.h> 11 12 #include "io_uring.h" 13 #include "sqpoll.h" 14 #include "fdinfo.h" 15 #include "cancel.h" 16 #include "rsrc.h" 17 18 #ifdef CONFIG_PROC_FS 19 static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, 20 const struct cred *cred) 21 { 22 struct user_namespace *uns = seq_user_ns(m); 23 struct group_info *gi; 24 kernel_cap_t cap; 25 int g; 26 27 seq_printf(m, "%5d\n", id); 28 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); 29 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); 30 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); 31 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); 32 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); 33 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); 34 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); 35 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); 36 seq_puts(m, "\n\tGroups:\t"); 37 gi = cred->group_info; 38 for (g = 0; g < gi->ngroups; g++) { 39 seq_put_decimal_ull(m, g ? " " : "", 40 from_kgid_munged(uns, gi->gid[g])); 41 } 42 seq_puts(m, "\n\tCapEff:\t"); 43 cap = cred->cap_effective; 44 seq_put_hex_ll(m, NULL, cap.val, 16); 45 seq_putc(m, '\n'); 46 return 0; 47 } 48 49 #ifdef CONFIG_NET_RX_BUSY_POLL 50 static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx, 51 struct seq_file *m, 52 const char *tracking_strategy) 53 { 54 seq_puts(m, "NAPI:\tenabled\n"); 55 seq_printf(m, "napi tracking:\t%s\n", tracking_strategy); 56 seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt); 57 if (ctx->napi_prefer_busy_poll) 58 seq_puts(m, "napi_prefer_busy_poll:\ttrue\n"); 59 else 60 seq_puts(m, "napi_prefer_busy_poll:\tfalse\n"); 61 } 62 63 static __cold void napi_show_fdinfo(struct io_ring_ctx *ctx, 64 struct seq_file *m) 65 { 66 unsigned int mode = READ_ONCE(ctx->napi_track_mode); 67 68 switch (mode) { 69 case IO_URING_NAPI_TRACKING_INACTIVE: 70 seq_puts(m, "NAPI:\tdisabled\n"); 71 break; 72 case IO_URING_NAPI_TRACKING_DYNAMIC: 73 common_tracking_show_fdinfo(ctx, m, "dynamic"); 74 break; 75 case IO_URING_NAPI_TRACKING_STATIC: 76 common_tracking_show_fdinfo(ctx, m, "static"); 77 break; 78 default: 79 seq_printf(m, "NAPI:\tunknown mode (%u)\n", mode); 80 } 81 } 82 #else 83 static inline void napi_show_fdinfo(struct io_ring_ctx *ctx, 84 struct seq_file *m) 85 { 86 } 87 #endif 88 89 /* 90 * Caller holds a reference to the file already, we don't need to do 91 * anything else to get an extra reference. 92 */ 93 __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) 94 { 95 struct io_ring_ctx *ctx = file->private_data; 96 struct io_overflow_cqe *ocqe; 97 struct io_rings *r = ctx->rings; 98 struct rusage sq_usage; 99 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; 100 unsigned int sq_head = READ_ONCE(r->sq.head); 101 unsigned int sq_tail = READ_ONCE(r->sq.tail); 102 unsigned int cq_head = READ_ONCE(r->cq.head); 103 unsigned int cq_tail = READ_ONCE(r->cq.tail); 104 unsigned int cq_shift = 0; 105 unsigned int sq_shift = 0; 106 unsigned int sq_entries, cq_entries; 107 int sq_pid = -1, sq_cpu = -1; 108 u64 sq_total_time = 0, sq_work_time = 0; 109 bool has_lock; 110 unsigned int i; 111 112 if (ctx->flags & IORING_SETUP_CQE32) 113 cq_shift = 1; 114 if (ctx->flags & IORING_SETUP_SQE128) 115 sq_shift = 1; 116 117 /* 118 * we may get imprecise sqe and cqe info if uring is actively running 119 * since we get cached_sq_head and cached_cq_tail without uring_lock 120 * and sq_tail and cq_head are changed by userspace. But it's ok since 121 * we usually use these info when it is stuck. 122 */ 123 seq_printf(m, "SqMask:\t0x%x\n", sq_mask); 124 seq_printf(m, "SqHead:\t%u\n", sq_head); 125 seq_printf(m, "SqTail:\t%u\n", sq_tail); 126 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); 127 seq_printf(m, "CqMask:\t0x%x\n", cq_mask); 128 seq_printf(m, "CqHead:\t%u\n", cq_head); 129 seq_printf(m, "CqTail:\t%u\n", cq_tail); 130 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); 131 seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); 132 sq_entries = min(sq_tail - sq_head, ctx->sq_entries); 133 for (i = 0; i < sq_entries; i++) { 134 unsigned int entry = i + sq_head; 135 struct io_uring_sqe *sqe; 136 unsigned int sq_idx; 137 138 if (ctx->flags & IORING_SETUP_NO_SQARRAY) 139 break; 140 sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); 141 if (sq_idx > sq_mask) 142 continue; 143 sqe = &ctx->sq_sqes[sq_idx << sq_shift]; 144 seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, " 145 "addr:0x%llx, rw_flags:0x%x, buf_index:%d " 146 "user_data:%llu", 147 sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd, 148 sqe->flags, (unsigned long long) sqe->off, 149 (unsigned long long) sqe->addr, sqe->rw_flags, 150 sqe->buf_index, sqe->user_data); 151 if (sq_shift) { 152 u64 *sqeb = (void *) (sqe + 1); 153 int size = sizeof(struct io_uring_sqe) / sizeof(u64); 154 int j; 155 156 for (j = 0; j < size; j++) { 157 seq_printf(m, ", e%d:0x%llx", j, 158 (unsigned long long) *sqeb); 159 sqeb++; 160 } 161 } 162 seq_printf(m, "\n"); 163 } 164 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); 165 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); 166 for (i = 0; i < cq_entries; i++) { 167 unsigned int entry = i + cq_head; 168 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; 169 170 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x", 171 entry & cq_mask, cqe->user_data, cqe->res, 172 cqe->flags); 173 if (cq_shift) 174 seq_printf(m, ", extra1:%llu, extra2:%llu\n", 175 cqe->big_cqe[0], cqe->big_cqe[1]); 176 seq_printf(m, "\n"); 177 } 178 179 /* 180 * Avoid ABBA deadlock between the seq lock and the io_uring mutex, 181 * since fdinfo case grabs it in the opposite direction of normal use 182 * cases. If we fail to get the lock, we just don't iterate any 183 * structures that could be going away outside the io_uring mutex. 184 */ 185 has_lock = mutex_trylock(&ctx->uring_lock); 186 187 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { 188 struct io_sq_data *sq = ctx->sq_data; 189 190 /* 191 * sq->thread might be NULL if we raced with the sqpoll 192 * thread termination. 193 */ 194 if (sq->thread) { 195 sq_pid = sq->task_pid; 196 sq_cpu = sq->sq_cpu; 197 getrusage(sq->thread, RUSAGE_SELF, &sq_usage); 198 sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000 199 + sq_usage.ru_stime.tv_usec); 200 sq_work_time = sq->work_time; 201 } 202 } 203 204 seq_printf(m, "SqThread:\t%d\n", sq_pid); 205 seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu); 206 seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time); 207 seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time); 208 seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr); 209 for (i = 0; has_lock && i < ctx->file_table.data.nr; i++) { 210 struct file *f = NULL; 211 212 if (ctx->file_table.data.nodes[i]) 213 f = io_slot_file(ctx->file_table.data.nodes[i]); 214 if (f) 215 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); 216 else 217 seq_printf(m, "%5u: <none>\n", i); 218 } 219 seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr); 220 for (i = 0; has_lock && i < ctx->buf_table.nr; i++) { 221 struct io_mapped_ubuf *buf = NULL; 222 223 if (ctx->buf_table.nodes[i]) 224 buf = ctx->buf_table.nodes[i]->buf; 225 if (buf) 226 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len); 227 else 228 seq_printf(m, "%5u: <none>\n", i); 229 } 230 if (has_lock && !xa_empty(&ctx->personalities)) { 231 unsigned long index; 232 const struct cred *cred; 233 234 seq_printf(m, "Personalities:\n"); 235 xa_for_each(&ctx->personalities, index, cred) 236 io_uring_show_cred(m, index, cred); 237 } 238 239 seq_puts(m, "PollList:\n"); 240 for (i = 0; has_lock && i < (1U << ctx->cancel_table.hash_bits); i++) { 241 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; 242 struct io_kiocb *req; 243 244 hlist_for_each_entry(req, &hb->list, hash_node) 245 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, 246 task_work_pending(req->tctx->task)); 247 } 248 249 if (has_lock) 250 mutex_unlock(&ctx->uring_lock); 251 252 seq_puts(m, "CqOverflowList:\n"); 253 spin_lock(&ctx->completion_lock); 254 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { 255 struct io_uring_cqe *cqe = &ocqe->cqe; 256 257 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n", 258 cqe->user_data, cqe->res, cqe->flags); 259 260 } 261 spin_unlock(&ctx->completion_lock); 262 napi_show_fdinfo(ctx, m); 263 } 264 #endif 265