1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/proc_fs.h> 7 #include <linux/seq_file.h> 8 #include <linux/io_uring.h> 9 10 #include <uapi/linux/io_uring.h> 11 12 #include "io_uring.h" 13 #include "sqpoll.h" 14 #include "fdinfo.h" 15 #include "cancel.h" 16 #include "rsrc.h" 17 18 #ifdef CONFIG_PROC_FS 19 static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, 20 const struct cred *cred) 21 { 22 struct user_namespace *uns = seq_user_ns(m); 23 struct group_info *gi; 24 kernel_cap_t cap; 25 int g; 26 27 seq_printf(m, "%5d\n", id); 28 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); 29 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); 30 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); 31 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); 32 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); 33 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); 34 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); 35 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); 36 seq_puts(m, "\n\tGroups:\t"); 37 gi = cred->group_info; 38 for (g = 0; g < gi->ngroups; g++) { 39 seq_put_decimal_ull(m, g ? " " : "", 40 from_kgid_munged(uns, gi->gid[g])); 41 } 42 seq_puts(m, "\n\tCapEff:\t"); 43 cap = cred->cap_effective; 44 seq_put_hex_ll(m, NULL, cap.val, 16); 45 seq_putc(m, '\n'); 46 return 0; 47 } 48 49 /* 50 * Caller holds a reference to the file already, we don't need to do 51 * anything else to get an extra reference. 52 */ 53 __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) 54 { 55 struct io_ring_ctx *ctx = f->private_data; 56 struct io_sq_data *sq = NULL; 57 struct io_overflow_cqe *ocqe; 58 struct io_rings *r = ctx->rings; 59 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; 60 unsigned int sq_head = READ_ONCE(r->sq.head); 61 unsigned int sq_tail = READ_ONCE(r->sq.tail); 62 unsigned int cq_head = READ_ONCE(r->cq.head); 63 unsigned int cq_tail = READ_ONCE(r->cq.tail); 64 unsigned int cq_shift = 0; 65 unsigned int sq_shift = 0; 66 unsigned int sq_entries, cq_entries; 67 bool has_lock; 68 unsigned int i; 69 70 if (ctx->flags & IORING_SETUP_CQE32) 71 cq_shift = 1; 72 if (ctx->flags & IORING_SETUP_SQE128) 73 sq_shift = 1; 74 75 /* 76 * we may get imprecise sqe and cqe info if uring is actively running 77 * since we get cached_sq_head and cached_cq_tail without uring_lock 78 * and sq_tail and cq_head are changed by userspace. But it's ok since 79 * we usually use these info when it is stuck. 80 */ 81 seq_printf(m, "SqMask:\t0x%x\n", sq_mask); 82 seq_printf(m, "SqHead:\t%u\n", sq_head); 83 seq_printf(m, "SqTail:\t%u\n", sq_tail); 84 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); 85 seq_printf(m, "CqMask:\t0x%x\n", cq_mask); 86 seq_printf(m, "CqHead:\t%u\n", cq_head); 87 seq_printf(m, "CqTail:\t%u\n", cq_tail); 88 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); 89 seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); 90 sq_entries = min(sq_tail - sq_head, ctx->sq_entries); 91 for (i = 0; i < sq_entries; i++) { 92 unsigned int entry = i + sq_head; 93 struct io_uring_sqe *sqe; 94 unsigned int sq_idx; 95 96 if (ctx->flags & IORING_SETUP_NO_SQARRAY) 97 break; 98 sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); 99 if (sq_idx > sq_mask) 100 continue; 101 sqe = &ctx->sq_sqes[sq_idx << sq_shift]; 102 seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, " 103 "addr:0x%llx, rw_flags:0x%x, buf_index:%d " 104 "user_data:%llu", 105 sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd, 106 sqe->flags, (unsigned long long) sqe->off, 107 (unsigned long long) sqe->addr, sqe->rw_flags, 108 sqe->buf_index, sqe->user_data); 109 if (sq_shift) { 110 u64 *sqeb = (void *) (sqe + 1); 111 int size = sizeof(struct io_uring_sqe) / sizeof(u64); 112 int j; 113 114 for (j = 0; j < size; j++) { 115 seq_printf(m, ", e%d:0x%llx", j, 116 (unsigned long long) *sqeb); 117 sqeb++; 118 } 119 } 120 seq_printf(m, "\n"); 121 } 122 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); 123 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); 124 for (i = 0; i < cq_entries; i++) { 125 unsigned int entry = i + cq_head; 126 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; 127 128 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x", 129 entry & cq_mask, cqe->user_data, cqe->res, 130 cqe->flags); 131 if (cq_shift) 132 seq_printf(m, ", extra1:%llu, extra2:%llu\n", 133 cqe->big_cqe[0], cqe->big_cqe[1]); 134 seq_printf(m, "\n"); 135 } 136 137 /* 138 * Avoid ABBA deadlock between the seq lock and the io_uring mutex, 139 * since fdinfo case grabs it in the opposite direction of normal use 140 * cases. If we fail to get the lock, we just don't iterate any 141 * structures that could be going away outside the io_uring mutex. 142 */ 143 has_lock = mutex_trylock(&ctx->uring_lock); 144 145 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { 146 sq = ctx->sq_data; 147 if (!sq->thread) 148 sq = NULL; 149 } 150 151 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1); 152 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); 153 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); 154 for (i = 0; has_lock && i < ctx->nr_user_files; i++) { 155 struct file *f = io_file_from_index(&ctx->file_table, i); 156 157 if (f) 158 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); 159 else 160 seq_printf(m, "%5u: <none>\n", i); 161 } 162 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); 163 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { 164 struct io_mapped_ubuf *buf = ctx->user_bufs[i]; 165 unsigned int len = buf->ubuf_end - buf->ubuf; 166 167 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); 168 } 169 if (has_lock && !xa_empty(&ctx->personalities)) { 170 unsigned long index; 171 const struct cred *cred; 172 173 seq_printf(m, "Personalities:\n"); 174 xa_for_each(&ctx->personalities, index, cred) 175 io_uring_show_cred(m, index, cred); 176 } 177 178 seq_puts(m, "PollList:\n"); 179 for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { 180 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; 181 struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i]; 182 struct io_kiocb *req; 183 184 spin_lock(&hb->lock); 185 hlist_for_each_entry(req, &hb->list, hash_node) 186 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, 187 task_work_pending(req->task)); 188 spin_unlock(&hb->lock); 189 190 if (!has_lock) 191 continue; 192 hlist_for_each_entry(req, &hbl->list, hash_node) 193 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, 194 task_work_pending(req->task)); 195 } 196 197 if (has_lock) 198 mutex_unlock(&ctx->uring_lock); 199 200 seq_puts(m, "CqOverflowList:\n"); 201 spin_lock(&ctx->completion_lock); 202 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { 203 struct io_uring_cqe *cqe = &ocqe->cqe; 204 205 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n", 206 cqe->user_data, cqe->res, cqe->flags); 207 208 } 209 210 spin_unlock(&ctx->completion_lock); 211 } 212 #endif 213