1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared application/kernel submission and completion ring pairs, for 4 * supporting fast/efficient IO. 5 * 6 * A note on the read/write ordering memory barriers that are matched between 7 * the application and kernel side. 8 * 9 * After the application reads the CQ ring tail, it must use an 10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses 11 * before writing the tail (using smp_load_acquire to read the tail will 12 * do). It also needs a smp_mb() before updating CQ head (ordering the 13 * entry load(s) with the head store), pairing with an implicit barrier 14 * through a control-dependency in io_get_cqe (smp_store_release to 15 * store head will do). Failure to do so could lead to reading invalid 16 * CQ entries. 17 * 18 * Likewise, the application must use an appropriate smp_wmb() before 19 * writing the SQ tail (ordering SQ entry stores with the tail store), 20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release 21 * to store the tail will do). And it needs a barrier ordering the SQ 22 * head load before writing new SQ entries (smp_load_acquire to read 23 * head will do). 24 * 25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application 26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* 27 * updating the SQ tail; a full memory barrier smp_mb() is needed 28 * between. 29 * 30 * Also see the examples in the liburing library: 31 * 32 * git://git.kernel.dk/liburing 33 * 34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens 35 * from data shared between the kernel and application. This is done both 36 * for ordering purposes, but also to ensure that once a value is loaded from 37 * data that the application could potentially modify, it remains stable. 38 * 39 * Copyright (C) 2018-2019 Jens Axboe 40 * Copyright (c) 2018-2019 Christoph Hellwig 41 */ 42 #include <linux/kernel.h> 43 #include <linux/init.h> 44 #include <linux/errno.h> 45 #include <linux/syscalls.h> 46 #include <net/compat.h> 47 #include <linux/refcount.h> 48 #include <linux/uio.h> 49 #include <linux/bits.h> 50 51 #include <linux/sched/signal.h> 52 #include <linux/fs.h> 53 #include <linux/file.h> 54 #include <linux/fdtable.h> 55 #include <linux/mm.h> 56 #include <linux/mman.h> 57 #include <linux/percpu.h> 58 #include <linux/slab.h> 59 #include <linux/bvec.h> 60 #include <linux/net.h> 61 #include <net/sock.h> 62 #include <linux/anon_inodes.h> 63 #include <linux/sched/mm.h> 64 #include <linux/uaccess.h> 65 #include <linux/nospec.h> 66 #include <linux/fsnotify.h> 67 #include <linux/fadvise.h> 68 #include <linux/task_work.h> 69 #include <linux/io_uring.h> 70 #include <linux/io_uring/cmd.h> 71 #include <linux/audit.h> 72 #include <linux/security.h> 73 #include <asm/shmparam.h> 74 75 #define CREATE_TRACE_POINTS 76 #include <trace/events/io_uring.h> 77 78 #include <uapi/linux/io_uring.h> 79 80 #include "io-wq.h" 81 82 #include "io_uring.h" 83 #include "opdef.h" 84 #include "refs.h" 85 #include "tctx.h" 86 #include "register.h" 87 #include "sqpoll.h" 88 #include "fdinfo.h" 89 #include "kbuf.h" 90 #include "rsrc.h" 91 #include "cancel.h" 92 #include "net.h" 93 #include "notif.h" 94 #include "waitid.h" 95 #include "futex.h" 96 #include "napi.h" 97 #include "uring_cmd.h" 98 #include "msg_ring.h" 99 #include "memmap.h" 100 101 #include "timeout.h" 102 #include "poll.h" 103 #include "rw.h" 104 #include "alloc_cache.h" 105 #include "eventfd.h" 106 107 #define IORING_MAX_ENTRIES 32768 108 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) 109 110 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ 111 IOSQE_IO_HARDLINK | IOSQE_ASYNC) 112 113 #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \ 114 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS) 115 116 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ 117 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ 118 REQ_F_ASYNC_DATA) 119 120 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\ 121 IO_REQ_CLEAN_FLAGS) 122 123 #define IO_TCTX_REFS_CACHE_NR (1U << 10) 124 125 #define IO_COMPL_BATCH 32 126 #define IO_REQ_ALLOC_BATCH 8 127 128 struct io_defer_entry { 129 struct list_head list; 130 struct io_kiocb *req; 131 u32 seq; 132 }; 133 134 /* requests with any of those set should undergo io_disarm_next() */ 135 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) 136 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) 137 138 /* 139 * No waiters. It's larger than any valid value of the tw counter 140 * so that tests against ->cq_wait_nr would fail and skip wake_up(). 141 */ 142 #define IO_CQ_WAKE_INIT (-1U) 143 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */ 144 #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1) 145 146 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 147 struct task_struct *task, 148 bool cancel_all); 149 150 static void io_queue_sqe(struct io_kiocb *req); 151 152 struct kmem_cache *req_cachep; 153 static struct workqueue_struct *iou_wq __ro_after_init; 154 155 static int __read_mostly sysctl_io_uring_disabled; 156 static int __read_mostly sysctl_io_uring_group = -1; 157 158 #ifdef CONFIG_SYSCTL 159 static struct ctl_table kernel_io_uring_disabled_table[] = { 160 { 161 .procname = "io_uring_disabled", 162 .data = &sysctl_io_uring_disabled, 163 .maxlen = sizeof(sysctl_io_uring_disabled), 164 .mode = 0644, 165 .proc_handler = proc_dointvec_minmax, 166 .extra1 = SYSCTL_ZERO, 167 .extra2 = SYSCTL_TWO, 168 }, 169 { 170 .procname = "io_uring_group", 171 .data = &sysctl_io_uring_group, 172 .maxlen = sizeof(gid_t), 173 .mode = 0644, 174 .proc_handler = proc_dointvec, 175 }, 176 }; 177 #endif 178 179 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) 180 { 181 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); 182 } 183 184 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) 185 { 186 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); 187 } 188 189 static bool io_match_linked(struct io_kiocb *head) 190 { 191 struct io_kiocb *req; 192 193 io_for_each_link(req, head) { 194 if (req->flags & REQ_F_INFLIGHT) 195 return true; 196 } 197 return false; 198 } 199 200 /* 201 * As io_match_task() but protected against racing with linked timeouts. 202 * User must not hold timeout_lock. 203 */ 204 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 205 bool cancel_all) 206 { 207 bool matched; 208 209 if (task && head->task != task) 210 return false; 211 if (cancel_all) 212 return true; 213 214 if (head->flags & REQ_F_LINK_TIMEOUT) { 215 struct io_ring_ctx *ctx = head->ctx; 216 217 /* protect against races with linked timeouts */ 218 spin_lock_irq(&ctx->timeout_lock); 219 matched = io_match_linked(head); 220 spin_unlock_irq(&ctx->timeout_lock); 221 } else { 222 matched = io_match_linked(head); 223 } 224 return matched; 225 } 226 227 static inline void req_fail_link_node(struct io_kiocb *req, int res) 228 { 229 req_set_fail(req); 230 io_req_set_res(req, res, 0); 231 } 232 233 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) 234 { 235 wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); 236 } 237 238 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) 239 { 240 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); 241 242 complete(&ctx->ref_comp); 243 } 244 245 static __cold void io_fallback_req_func(struct work_struct *work) 246 { 247 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, 248 fallback_work.work); 249 struct llist_node *node = llist_del_all(&ctx->fallback_llist); 250 struct io_kiocb *req, *tmp; 251 struct io_tw_state ts = {}; 252 253 percpu_ref_get(&ctx->refs); 254 mutex_lock(&ctx->uring_lock); 255 llist_for_each_entry_safe(req, tmp, node, io_task_work.node) 256 req->io_task_work.func(req, &ts); 257 io_submit_flush_completions(ctx); 258 mutex_unlock(&ctx->uring_lock); 259 percpu_ref_put(&ctx->refs); 260 } 261 262 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) 263 { 264 unsigned hash_buckets = 1U << bits; 265 size_t hash_size = hash_buckets * sizeof(table->hbs[0]); 266 267 table->hbs = kmalloc(hash_size, GFP_KERNEL); 268 if (!table->hbs) 269 return -ENOMEM; 270 271 table->hash_bits = bits; 272 init_hash_table(table, hash_buckets); 273 return 0; 274 } 275 276 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) 277 { 278 struct io_ring_ctx *ctx; 279 int hash_bits; 280 bool ret; 281 282 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 283 if (!ctx) 284 return NULL; 285 286 xa_init(&ctx->io_bl_xa); 287 288 /* 289 * Use 5 bits less than the max cq entries, that should give us around 290 * 32 entries per hash list if totally full and uniformly spread, but 291 * don't keep too many buckets to not overconsume memory. 292 */ 293 hash_bits = ilog2(p->cq_entries) - 5; 294 hash_bits = clamp(hash_bits, 1, 8); 295 if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) 296 goto err; 297 if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits)) 298 goto err; 299 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 300 0, GFP_KERNEL)) 301 goto err; 302 303 ctx->flags = p->flags; 304 atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT); 305 init_waitqueue_head(&ctx->sqo_sq_wait); 306 INIT_LIST_HEAD(&ctx->sqd_list); 307 INIT_LIST_HEAD(&ctx->cq_overflow_list); 308 INIT_LIST_HEAD(&ctx->io_buffers_cache); 309 ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, 310 sizeof(struct io_rsrc_node)); 311 ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX, 312 sizeof(struct async_poll)); 313 ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, 314 sizeof(struct io_async_msghdr)); 315 ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, 316 sizeof(struct io_async_rw)); 317 ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX, 318 sizeof(struct uring_cache)); 319 spin_lock_init(&ctx->msg_lock); 320 ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX, 321 sizeof(struct io_kiocb)); 322 ret |= io_futex_cache_init(ctx); 323 if (ret) 324 goto err; 325 init_completion(&ctx->ref_comp); 326 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); 327 mutex_init(&ctx->uring_lock); 328 init_waitqueue_head(&ctx->cq_wait); 329 init_waitqueue_head(&ctx->poll_wq); 330 init_waitqueue_head(&ctx->rsrc_quiesce_wq); 331 spin_lock_init(&ctx->completion_lock); 332 spin_lock_init(&ctx->timeout_lock); 333 INIT_WQ_LIST(&ctx->iopoll_list); 334 INIT_LIST_HEAD(&ctx->io_buffers_comp); 335 INIT_LIST_HEAD(&ctx->defer_list); 336 INIT_LIST_HEAD(&ctx->timeout_list); 337 INIT_LIST_HEAD(&ctx->ltimeout_list); 338 INIT_LIST_HEAD(&ctx->rsrc_ref_list); 339 init_llist_head(&ctx->work_llist); 340 INIT_LIST_HEAD(&ctx->tctx_list); 341 ctx->submit_state.free_list.next = NULL; 342 INIT_HLIST_HEAD(&ctx->waitid_list); 343 #ifdef CONFIG_FUTEX 344 INIT_HLIST_HEAD(&ctx->futex_list); 345 #endif 346 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); 347 INIT_WQ_LIST(&ctx->submit_state.compl_reqs); 348 INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd); 349 io_napi_init(ctx); 350 351 return ctx; 352 err: 353 io_alloc_cache_free(&ctx->rsrc_node_cache, kfree); 354 io_alloc_cache_free(&ctx->apoll_cache, kfree); 355 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); 356 io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); 357 io_alloc_cache_free(&ctx->uring_cache, kfree); 358 io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); 359 io_futex_cache_free(ctx); 360 kfree(ctx->cancel_table.hbs); 361 kfree(ctx->cancel_table_locked.hbs); 362 xa_destroy(&ctx->io_bl_xa); 363 kfree(ctx); 364 return NULL; 365 } 366 367 static void io_account_cq_overflow(struct io_ring_ctx *ctx) 368 { 369 struct io_rings *r = ctx->rings; 370 371 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); 372 ctx->cq_extra--; 373 } 374 375 static bool req_need_defer(struct io_kiocb *req, u32 seq) 376 { 377 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { 378 struct io_ring_ctx *ctx = req->ctx; 379 380 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; 381 } 382 383 return false; 384 } 385 386 static void io_clean_op(struct io_kiocb *req) 387 { 388 if (req->flags & REQ_F_BUFFER_SELECTED) { 389 spin_lock(&req->ctx->completion_lock); 390 io_kbuf_drop(req); 391 spin_unlock(&req->ctx->completion_lock); 392 } 393 394 if (req->flags & REQ_F_NEED_CLEANUP) { 395 const struct io_cold_def *def = &io_cold_defs[req->opcode]; 396 397 if (def->cleanup) 398 def->cleanup(req); 399 } 400 if ((req->flags & REQ_F_POLLED) && req->apoll) { 401 kfree(req->apoll->double_poll); 402 kfree(req->apoll); 403 req->apoll = NULL; 404 } 405 if (req->flags & REQ_F_INFLIGHT) { 406 struct io_uring_task *tctx = req->task->io_uring; 407 408 atomic_dec(&tctx->inflight_tracked); 409 } 410 if (req->flags & REQ_F_CREDS) 411 put_cred(req->creds); 412 if (req->flags & REQ_F_ASYNC_DATA) { 413 kfree(req->async_data); 414 req->async_data = NULL; 415 } 416 req->flags &= ~IO_REQ_CLEAN_FLAGS; 417 } 418 419 static inline void io_req_track_inflight(struct io_kiocb *req) 420 { 421 if (!(req->flags & REQ_F_INFLIGHT)) { 422 req->flags |= REQ_F_INFLIGHT; 423 atomic_inc(&req->task->io_uring->inflight_tracked); 424 } 425 } 426 427 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) 428 { 429 if (WARN_ON_ONCE(!req->link)) 430 return NULL; 431 432 req->flags &= ~REQ_F_ARM_LTIMEOUT; 433 req->flags |= REQ_F_LINK_TIMEOUT; 434 435 /* linked timeouts should have two refs once prep'ed */ 436 io_req_set_refcount(req); 437 __io_req_set_refcount(req->link, 2); 438 return req->link; 439 } 440 441 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) 442 { 443 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) 444 return NULL; 445 return __io_prep_linked_timeout(req); 446 } 447 448 static noinline void __io_arm_ltimeout(struct io_kiocb *req) 449 { 450 io_queue_linked_timeout(__io_prep_linked_timeout(req)); 451 } 452 453 static inline void io_arm_ltimeout(struct io_kiocb *req) 454 { 455 if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) 456 __io_arm_ltimeout(req); 457 } 458 459 static void io_prep_async_work(struct io_kiocb *req) 460 { 461 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 462 struct io_ring_ctx *ctx = req->ctx; 463 464 if (!(req->flags & REQ_F_CREDS)) { 465 req->flags |= REQ_F_CREDS; 466 req->creds = get_current_cred(); 467 } 468 469 req->work.list.next = NULL; 470 atomic_set(&req->work.flags, 0); 471 if (req->flags & REQ_F_FORCE_ASYNC) 472 atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags); 473 474 if (req->file && !(req->flags & REQ_F_FIXED_FILE)) 475 req->flags |= io_file_get_flags(req->file); 476 477 if (req->file && (req->flags & REQ_F_ISREG)) { 478 bool should_hash = def->hash_reg_file; 479 480 /* don't serialize this request if the fs doesn't need it */ 481 if (should_hash && (req->file->f_flags & O_DIRECT) && 482 (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE)) 483 should_hash = false; 484 if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) 485 io_wq_hash_work(&req->work, file_inode(req->file)); 486 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { 487 if (def->unbound_nonreg_file) 488 atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags); 489 } 490 } 491 492 static void io_prep_async_link(struct io_kiocb *req) 493 { 494 struct io_kiocb *cur; 495 496 if (req->flags & REQ_F_LINK_TIMEOUT) { 497 struct io_ring_ctx *ctx = req->ctx; 498 499 spin_lock_irq(&ctx->timeout_lock); 500 io_for_each_link(cur, req) 501 io_prep_async_work(cur); 502 spin_unlock_irq(&ctx->timeout_lock); 503 } else { 504 io_for_each_link(cur, req) 505 io_prep_async_work(cur); 506 } 507 } 508 509 static void io_queue_iowq(struct io_kiocb *req) 510 { 511 struct io_kiocb *link = io_prep_linked_timeout(req); 512 struct io_uring_task *tctx = req->task->io_uring; 513 514 BUG_ON(!tctx); 515 BUG_ON(!tctx->io_wq); 516 517 /* init ->work of the whole link before punting */ 518 io_prep_async_link(req); 519 520 /* 521 * Not expected to happen, but if we do have a bug where this _can_ 522 * happen, catch it here and ensure the request is marked as 523 * canceled. That will make io-wq go through the usual work cancel 524 * procedure rather than attempt to run this request (or create a new 525 * worker for it). 526 */ 527 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) 528 atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags); 529 530 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); 531 io_wq_enqueue(tctx->io_wq, &req->work); 532 if (link) 533 io_queue_linked_timeout(link); 534 } 535 536 static __cold void io_queue_deferred(struct io_ring_ctx *ctx) 537 { 538 while (!list_empty(&ctx->defer_list)) { 539 struct io_defer_entry *de = list_first_entry(&ctx->defer_list, 540 struct io_defer_entry, list); 541 542 if (req_need_defer(de->req, de->seq)) 543 break; 544 list_del_init(&de->list); 545 io_req_task_queue(de->req); 546 kfree(de); 547 } 548 } 549 550 void __io_commit_cqring_flush(struct io_ring_ctx *ctx) 551 { 552 if (ctx->poll_activated) 553 io_poll_wq_wake(ctx); 554 if (ctx->off_timeout_used) 555 io_flush_timeouts(ctx); 556 if (ctx->drain_active) { 557 spin_lock(&ctx->completion_lock); 558 io_queue_deferred(ctx); 559 spin_unlock(&ctx->completion_lock); 560 } 561 if (ctx->has_evfd) 562 io_eventfd_flush_signal(ctx); 563 } 564 565 static inline void __io_cq_lock(struct io_ring_ctx *ctx) 566 { 567 if (!ctx->lockless_cq) 568 spin_lock(&ctx->completion_lock); 569 } 570 571 static inline void io_cq_lock(struct io_ring_ctx *ctx) 572 __acquires(ctx->completion_lock) 573 { 574 spin_lock(&ctx->completion_lock); 575 } 576 577 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) 578 { 579 io_commit_cqring(ctx); 580 if (!ctx->task_complete) { 581 if (!ctx->lockless_cq) 582 spin_unlock(&ctx->completion_lock); 583 /* IOPOLL rings only need to wake up if it's also SQPOLL */ 584 if (!ctx->syscall_iopoll) 585 io_cqring_wake(ctx); 586 } 587 io_commit_cqring_flush(ctx); 588 } 589 590 static void io_cq_unlock_post(struct io_ring_ctx *ctx) 591 __releases(ctx->completion_lock) 592 { 593 io_commit_cqring(ctx); 594 spin_unlock(&ctx->completion_lock); 595 io_cqring_wake(ctx); 596 io_commit_cqring_flush(ctx); 597 } 598 599 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) 600 { 601 size_t cqe_size = sizeof(struct io_uring_cqe); 602 603 lockdep_assert_held(&ctx->uring_lock); 604 605 /* don't abort if we're dying, entries must get freed */ 606 if (!dying && __io_cqring_events(ctx) == ctx->cq_entries) 607 return; 608 609 if (ctx->flags & IORING_SETUP_CQE32) 610 cqe_size <<= 1; 611 612 io_cq_lock(ctx); 613 while (!list_empty(&ctx->cq_overflow_list)) { 614 struct io_uring_cqe *cqe; 615 struct io_overflow_cqe *ocqe; 616 617 ocqe = list_first_entry(&ctx->cq_overflow_list, 618 struct io_overflow_cqe, list); 619 620 if (!dying) { 621 if (!io_get_cqe_overflow(ctx, &cqe, true)) 622 break; 623 memcpy(cqe, &ocqe->cqe, cqe_size); 624 } 625 list_del(&ocqe->list); 626 kfree(ocqe); 627 } 628 629 if (list_empty(&ctx->cq_overflow_list)) { 630 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 631 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); 632 } 633 io_cq_unlock_post(ctx); 634 } 635 636 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) 637 { 638 if (ctx->rings) 639 __io_cqring_overflow_flush(ctx, true); 640 } 641 642 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) 643 { 644 mutex_lock(&ctx->uring_lock); 645 __io_cqring_overflow_flush(ctx, false); 646 mutex_unlock(&ctx->uring_lock); 647 } 648 649 /* can be called by any task */ 650 static void io_put_task_remote(struct task_struct *task) 651 { 652 struct io_uring_task *tctx = task->io_uring; 653 654 percpu_counter_sub(&tctx->inflight, 1); 655 if (unlikely(atomic_read(&tctx->in_cancel))) 656 wake_up(&tctx->wait); 657 put_task_struct(task); 658 } 659 660 /* used by a task to put its own references */ 661 static void io_put_task_local(struct task_struct *task) 662 { 663 task->io_uring->cached_refs++; 664 } 665 666 /* must to be called somewhat shortly after putting a request */ 667 static inline void io_put_task(struct task_struct *task) 668 { 669 if (likely(task == current)) 670 io_put_task_local(task); 671 else 672 io_put_task_remote(task); 673 } 674 675 void io_task_refs_refill(struct io_uring_task *tctx) 676 { 677 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; 678 679 percpu_counter_add(&tctx->inflight, refill); 680 refcount_add(refill, ¤t->usage); 681 tctx->cached_refs += refill; 682 } 683 684 static __cold void io_uring_drop_tctx_refs(struct task_struct *task) 685 { 686 struct io_uring_task *tctx = task->io_uring; 687 unsigned int refs = tctx->cached_refs; 688 689 if (refs) { 690 tctx->cached_refs = 0; 691 percpu_counter_sub(&tctx->inflight, refs); 692 put_task_struct_many(task, refs); 693 } 694 } 695 696 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, 697 s32 res, u32 cflags, u64 extra1, u64 extra2) 698 { 699 struct io_overflow_cqe *ocqe; 700 size_t ocq_size = sizeof(struct io_overflow_cqe); 701 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); 702 703 lockdep_assert_held(&ctx->completion_lock); 704 705 if (is_cqe32) 706 ocq_size += sizeof(struct io_uring_cqe); 707 708 ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); 709 trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); 710 if (!ocqe) { 711 /* 712 * If we're in ring overflow flush mode, or in task cancel mode, 713 * or cannot allocate an overflow entry, then we need to drop it 714 * on the floor. 715 */ 716 io_account_cq_overflow(ctx); 717 set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); 718 return false; 719 } 720 if (list_empty(&ctx->cq_overflow_list)) { 721 set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); 722 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); 723 724 } 725 ocqe->cqe.user_data = user_data; 726 ocqe->cqe.res = res; 727 ocqe->cqe.flags = cflags; 728 if (is_cqe32) { 729 ocqe->cqe.big_cqe[0] = extra1; 730 ocqe->cqe.big_cqe[1] = extra2; 731 } 732 list_add_tail(&ocqe->list, &ctx->cq_overflow_list); 733 return true; 734 } 735 736 static void io_req_cqe_overflow(struct io_kiocb *req) 737 { 738 io_cqring_event_overflow(req->ctx, req->cqe.user_data, 739 req->cqe.res, req->cqe.flags, 740 req->big_cqe.extra1, req->big_cqe.extra2); 741 memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 742 } 743 744 /* 745 * writes to the cq entry need to come after reading head; the 746 * control dependency is enough as we're using WRITE_ONCE to 747 * fill the cq entry 748 */ 749 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) 750 { 751 struct io_rings *rings = ctx->rings; 752 unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); 753 unsigned int free, queued, len; 754 755 /* 756 * Posting into the CQ when there are pending overflowed CQEs may break 757 * ordering guarantees, which will affect links, F_MORE users and more. 758 * Force overflow the completion. 759 */ 760 if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) 761 return false; 762 763 /* userspace may cheat modifying the tail, be safe and do min */ 764 queued = min(__io_cqring_events(ctx), ctx->cq_entries); 765 free = ctx->cq_entries - queued; 766 /* we need a contiguous range, limit based on the current array offset */ 767 len = min(free, ctx->cq_entries - off); 768 if (!len) 769 return false; 770 771 if (ctx->flags & IORING_SETUP_CQE32) { 772 off <<= 1; 773 len <<= 1; 774 } 775 776 ctx->cqe_cached = &rings->cqes[off]; 777 ctx->cqe_sentinel = ctx->cqe_cached + len; 778 return true; 779 } 780 781 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, 782 u32 cflags) 783 { 784 struct io_uring_cqe *cqe; 785 786 ctx->cq_extra++; 787 788 /* 789 * If we can't get a cq entry, userspace overflowed the 790 * submission (by quite a lot). Increment the overflow count in 791 * the ring. 792 */ 793 if (likely(io_get_cqe(ctx, &cqe))) { 794 trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); 795 796 WRITE_ONCE(cqe->user_data, user_data); 797 WRITE_ONCE(cqe->res, res); 798 WRITE_ONCE(cqe->flags, cflags); 799 800 if (ctx->flags & IORING_SETUP_CQE32) { 801 WRITE_ONCE(cqe->big_cqe[0], 0); 802 WRITE_ONCE(cqe->big_cqe[1], 0); 803 } 804 return true; 805 } 806 return false; 807 } 808 809 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, 810 u32 cflags) 811 { 812 bool filled; 813 814 filled = io_fill_cqe_aux(ctx, user_data, res, cflags); 815 if (!filled) 816 filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); 817 818 return filled; 819 } 820 821 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) 822 { 823 bool filled; 824 825 io_cq_lock(ctx); 826 filled = __io_post_aux_cqe(ctx, user_data, res, cflags); 827 io_cq_unlock_post(ctx); 828 return filled; 829 } 830 831 /* 832 * Must be called from inline task_work so we now a flush will happen later, 833 * and obviously with ctx->uring_lock held (tw always has that). 834 */ 835 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) 836 { 837 if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) { 838 spin_lock(&ctx->completion_lock); 839 io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); 840 spin_unlock(&ctx->completion_lock); 841 } 842 ctx->submit_state.cq_flush = true; 843 } 844 845 /* 846 * A helper for multishot requests posting additional CQEs. 847 * Should only be used from a task_work including IO_URING_F_MULTISHOT. 848 */ 849 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) 850 { 851 struct io_ring_ctx *ctx = req->ctx; 852 bool posted; 853 854 lockdep_assert(!io_wq_current_is_worker()); 855 lockdep_assert_held(&ctx->uring_lock); 856 857 __io_cq_lock(ctx); 858 posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); 859 ctx->submit_state.cq_flush = true; 860 __io_cq_unlock_post(ctx); 861 return posted; 862 } 863 864 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) 865 { 866 struct io_ring_ctx *ctx = req->ctx; 867 868 /* 869 * All execution paths but io-wq use the deferred completions by 870 * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here. 871 */ 872 if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ))) 873 return; 874 875 /* 876 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires 877 * the submitter task context, IOPOLL protects with uring_lock. 878 */ 879 if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) { 880 req->io_task_work.func = io_req_task_complete; 881 io_req_task_work_add(req); 882 return; 883 } 884 885 io_cq_lock(ctx); 886 if (!(req->flags & REQ_F_CQE_SKIP)) { 887 if (!io_fill_cqe_req(ctx, req)) 888 io_req_cqe_overflow(req); 889 } 890 io_cq_unlock_post(ctx); 891 892 /* 893 * We don't free the request here because we know it's called from 894 * io-wq only, which holds a reference, so it cannot be the last put. 895 */ 896 req_ref_put(req); 897 } 898 899 void io_req_defer_failed(struct io_kiocb *req, s32 res) 900 __must_hold(&ctx->uring_lock) 901 { 902 const struct io_cold_def *def = &io_cold_defs[req->opcode]; 903 904 lockdep_assert_held(&req->ctx->uring_lock); 905 906 req_set_fail(req); 907 io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); 908 if (def->fail) 909 def->fail(req); 910 io_req_complete_defer(req); 911 } 912 913 /* 914 * Don't initialise the fields below on every allocation, but do that in 915 * advance and keep them valid across allocations. 916 */ 917 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) 918 { 919 req->ctx = ctx; 920 req->link = NULL; 921 req->async_data = NULL; 922 /* not necessary, but safer to zero */ 923 memset(&req->cqe, 0, sizeof(req->cqe)); 924 memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 925 } 926 927 /* 928 * A request might get retired back into the request caches even before opcode 929 * handlers and io_issue_sqe() are done with it, e.g. inline completion path. 930 * Because of that, io_alloc_req() should be called only under ->uring_lock 931 * and with extra caution to not get a request that is still worked on. 932 */ 933 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) 934 __must_hold(&ctx->uring_lock) 935 { 936 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 937 void *reqs[IO_REQ_ALLOC_BATCH]; 938 int ret; 939 940 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); 941 942 /* 943 * Bulk alloc is all-or-nothing. If we fail to get a batch, 944 * retry single alloc to be on the safe side. 945 */ 946 if (unlikely(ret <= 0)) { 947 reqs[0] = kmem_cache_alloc(req_cachep, gfp); 948 if (!reqs[0]) 949 return false; 950 ret = 1; 951 } 952 953 percpu_ref_get_many(&ctx->refs, ret); 954 while (ret--) { 955 struct io_kiocb *req = reqs[ret]; 956 957 io_preinit_req(req, ctx); 958 io_req_add_to_cache(req, ctx); 959 } 960 return true; 961 } 962 963 __cold void io_free_req(struct io_kiocb *req) 964 { 965 /* refs were already put, restore them for io_req_task_complete() */ 966 req->flags &= ~REQ_F_REFCOUNT; 967 /* we only want to free it, don't post CQEs */ 968 req->flags |= REQ_F_CQE_SKIP; 969 req->io_task_work.func = io_req_task_complete; 970 io_req_task_work_add(req); 971 } 972 973 static void __io_req_find_next_prep(struct io_kiocb *req) 974 { 975 struct io_ring_ctx *ctx = req->ctx; 976 977 spin_lock(&ctx->completion_lock); 978 io_disarm_next(req); 979 spin_unlock(&ctx->completion_lock); 980 } 981 982 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) 983 { 984 struct io_kiocb *nxt; 985 986 /* 987 * If LINK is set, we have dependent requests in this chain. If we 988 * didn't fail this request, queue the first one up, moving any other 989 * dependencies to the next request. In case of failure, fail the rest 990 * of the chain. 991 */ 992 if (unlikely(req->flags & IO_DISARM_MASK)) 993 __io_req_find_next_prep(req); 994 nxt = req->link; 995 req->link = NULL; 996 return nxt; 997 } 998 999 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) 1000 { 1001 if (!ctx) 1002 return; 1003 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1004 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1005 1006 io_submit_flush_completions(ctx); 1007 mutex_unlock(&ctx->uring_lock); 1008 percpu_ref_put(&ctx->refs); 1009 } 1010 1011 /* 1012 * Run queued task_work, returning the number of entries processed in *count. 1013 * If more entries than max_entries are available, stop processing once this 1014 * is reached and return the rest of the list. 1015 */ 1016 struct llist_node *io_handle_tw_list(struct llist_node *node, 1017 unsigned int *count, 1018 unsigned int max_entries) 1019 { 1020 struct io_ring_ctx *ctx = NULL; 1021 struct io_tw_state ts = { }; 1022 1023 do { 1024 struct llist_node *next = node->next; 1025 struct io_kiocb *req = container_of(node, struct io_kiocb, 1026 io_task_work.node); 1027 1028 if (req->ctx != ctx) { 1029 ctx_flush_and_put(ctx, &ts); 1030 ctx = req->ctx; 1031 mutex_lock(&ctx->uring_lock); 1032 percpu_ref_get(&ctx->refs); 1033 } 1034 INDIRECT_CALL_2(req->io_task_work.func, 1035 io_poll_task_func, io_req_rw_complete, 1036 req, &ts); 1037 node = next; 1038 (*count)++; 1039 if (unlikely(need_resched())) { 1040 ctx_flush_and_put(ctx, &ts); 1041 ctx = NULL; 1042 cond_resched(); 1043 } 1044 } while (node && *count < max_entries); 1045 1046 ctx_flush_and_put(ctx, &ts); 1047 return node; 1048 } 1049 1050 /** 1051 * io_llist_xchg - swap all entries in a lock-less list 1052 * @head: the head of lock-less list to delete all entries 1053 * @new: new entry as the head of the list 1054 * 1055 * If list is empty, return NULL, otherwise, return the pointer to the first entry. 1056 * The order of entries returned is from the newest to the oldest added one. 1057 */ 1058 static inline struct llist_node *io_llist_xchg(struct llist_head *head, 1059 struct llist_node *new) 1060 { 1061 return xchg(&head->first, new); 1062 } 1063 1064 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) 1065 { 1066 struct llist_node *node = llist_del_all(&tctx->task_list); 1067 struct io_ring_ctx *last_ctx = NULL; 1068 struct io_kiocb *req; 1069 1070 while (node) { 1071 req = container_of(node, struct io_kiocb, io_task_work.node); 1072 node = node->next; 1073 if (sync && last_ctx != req->ctx) { 1074 if (last_ctx) { 1075 flush_delayed_work(&last_ctx->fallback_work); 1076 percpu_ref_put(&last_ctx->refs); 1077 } 1078 last_ctx = req->ctx; 1079 percpu_ref_get(&last_ctx->refs); 1080 } 1081 if (llist_add(&req->io_task_work.node, 1082 &req->ctx->fallback_llist)) 1083 schedule_delayed_work(&req->ctx->fallback_work, 1); 1084 } 1085 1086 if (last_ctx) { 1087 flush_delayed_work(&last_ctx->fallback_work); 1088 percpu_ref_put(&last_ctx->refs); 1089 } 1090 } 1091 1092 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, 1093 unsigned int max_entries, 1094 unsigned int *count) 1095 { 1096 struct llist_node *node; 1097 1098 if (unlikely(current->flags & PF_EXITING)) { 1099 io_fallback_tw(tctx, true); 1100 return NULL; 1101 } 1102 1103 node = llist_del_all(&tctx->task_list); 1104 if (node) { 1105 node = llist_reverse_order(node); 1106 node = io_handle_tw_list(node, count, max_entries); 1107 } 1108 1109 /* relaxed read is enough as only the task itself sets ->in_cancel */ 1110 if (unlikely(atomic_read(&tctx->in_cancel))) 1111 io_uring_drop_tctx_refs(current); 1112 1113 trace_io_uring_task_work_run(tctx, *count); 1114 return node; 1115 } 1116 1117 void tctx_task_work(struct callback_head *cb) 1118 { 1119 struct io_uring_task *tctx; 1120 struct llist_node *ret; 1121 unsigned int count = 0; 1122 1123 tctx = container_of(cb, struct io_uring_task, task_work); 1124 ret = tctx_task_work_run(tctx, UINT_MAX, &count); 1125 /* can't happen */ 1126 WARN_ON_ONCE(ret); 1127 } 1128 1129 static inline void io_req_local_work_add(struct io_kiocb *req, 1130 struct io_ring_ctx *ctx, 1131 unsigned flags) 1132 { 1133 unsigned nr_wait, nr_tw, nr_tw_prev; 1134 struct llist_node *head; 1135 1136 /* See comment above IO_CQ_WAKE_INIT */ 1137 BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES); 1138 1139 /* 1140 * We don't know how many reuqests is there in the link and whether 1141 * they can even be queued lazily, fall back to non-lazy. 1142 */ 1143 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) 1144 flags &= ~IOU_F_TWQ_LAZY_WAKE; 1145 1146 guard(rcu)(); 1147 1148 head = READ_ONCE(ctx->work_llist.first); 1149 do { 1150 nr_tw_prev = 0; 1151 if (head) { 1152 struct io_kiocb *first_req = container_of(head, 1153 struct io_kiocb, 1154 io_task_work.node); 1155 /* 1156 * Might be executed at any moment, rely on 1157 * SLAB_TYPESAFE_BY_RCU to keep it alive. 1158 */ 1159 nr_tw_prev = READ_ONCE(first_req->nr_tw); 1160 } 1161 1162 /* 1163 * Theoretically, it can overflow, but that's fine as one of 1164 * previous adds should've tried to wake the task. 1165 */ 1166 nr_tw = nr_tw_prev + 1; 1167 if (!(flags & IOU_F_TWQ_LAZY_WAKE)) 1168 nr_tw = IO_CQ_WAKE_FORCE; 1169 1170 req->nr_tw = nr_tw; 1171 req->io_task_work.node.next = head; 1172 } while (!try_cmpxchg(&ctx->work_llist.first, &head, 1173 &req->io_task_work.node)); 1174 1175 /* 1176 * cmpxchg implies a full barrier, which pairs with the barrier 1177 * in set_current_state() on the io_cqring_wait() side. It's used 1178 * to ensure that either we see updated ->cq_wait_nr, or waiters 1179 * going to sleep will observe the work added to the list, which 1180 * is similar to the wait/wawke task state sync. 1181 */ 1182 1183 if (!head) { 1184 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1185 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1186 if (ctx->has_evfd) 1187 io_eventfd_signal(ctx); 1188 } 1189 1190 nr_wait = atomic_read(&ctx->cq_wait_nr); 1191 /* not enough or no one is waiting */ 1192 if (nr_tw < nr_wait) 1193 return; 1194 /* the previous add has already woken it up */ 1195 if (nr_tw_prev >= nr_wait) 1196 return; 1197 wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); 1198 } 1199 1200 static void io_req_normal_work_add(struct io_kiocb *req) 1201 { 1202 struct io_uring_task *tctx = req->task->io_uring; 1203 struct io_ring_ctx *ctx = req->ctx; 1204 1205 /* task_work already pending, we're done */ 1206 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) 1207 return; 1208 1209 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1210 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1211 1212 /* SQPOLL doesn't need the task_work added, it'll run it itself */ 1213 if (ctx->flags & IORING_SETUP_SQPOLL) { 1214 struct io_sq_data *sqd = ctx->sq_data; 1215 1216 if (sqd->thread) 1217 __set_notify_signal(sqd->thread); 1218 return; 1219 } 1220 1221 if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) 1222 return; 1223 1224 io_fallback_tw(tctx, false); 1225 } 1226 1227 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) 1228 { 1229 if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) 1230 io_req_local_work_add(req, req->ctx, flags); 1231 else 1232 io_req_normal_work_add(req); 1233 } 1234 1235 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx, 1236 unsigned flags) 1237 { 1238 if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))) 1239 return; 1240 io_req_local_work_add(req, ctx, flags); 1241 } 1242 1243 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) 1244 { 1245 struct llist_node *node; 1246 1247 node = llist_del_all(&ctx->work_llist); 1248 while (node) { 1249 struct io_kiocb *req = container_of(node, struct io_kiocb, 1250 io_task_work.node); 1251 1252 node = node->next; 1253 io_req_normal_work_add(req); 1254 } 1255 } 1256 1257 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, 1258 int min_events) 1259 { 1260 if (llist_empty(&ctx->work_llist)) 1261 return false; 1262 if (events < min_events) 1263 return true; 1264 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1265 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1266 return false; 1267 } 1268 1269 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts, 1270 int min_events) 1271 { 1272 struct llist_node *node; 1273 unsigned int loops = 0; 1274 int ret = 0; 1275 1276 if (WARN_ON_ONCE(ctx->submitter_task != current)) 1277 return -EEXIST; 1278 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1279 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1280 again: 1281 /* 1282 * llists are in reverse order, flip it back the right way before 1283 * running the pending items. 1284 */ 1285 node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL)); 1286 while (node) { 1287 struct llist_node *next = node->next; 1288 struct io_kiocb *req = container_of(node, struct io_kiocb, 1289 io_task_work.node); 1290 INDIRECT_CALL_2(req->io_task_work.func, 1291 io_poll_task_func, io_req_rw_complete, 1292 req, ts); 1293 ret++; 1294 node = next; 1295 } 1296 loops++; 1297 1298 if (io_run_local_work_continue(ctx, ret, min_events)) 1299 goto again; 1300 io_submit_flush_completions(ctx); 1301 if (io_run_local_work_continue(ctx, ret, min_events)) 1302 goto again; 1303 1304 trace_io_uring_local_work_run(ctx, ret, loops); 1305 return ret; 1306 } 1307 1308 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, 1309 int min_events) 1310 { 1311 struct io_tw_state ts = {}; 1312 1313 if (llist_empty(&ctx->work_llist)) 1314 return 0; 1315 return __io_run_local_work(ctx, &ts, min_events); 1316 } 1317 1318 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) 1319 { 1320 struct io_tw_state ts = {}; 1321 int ret; 1322 1323 mutex_lock(&ctx->uring_lock); 1324 ret = __io_run_local_work(ctx, &ts, min_events); 1325 mutex_unlock(&ctx->uring_lock); 1326 return ret; 1327 } 1328 1329 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) 1330 { 1331 io_tw_lock(req->ctx, ts); 1332 io_req_defer_failed(req, req->cqe.res); 1333 } 1334 1335 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) 1336 { 1337 io_tw_lock(req->ctx, ts); 1338 /* req->task == current here, checking PF_EXITING is safe */ 1339 if (unlikely(req->task->flags & PF_EXITING)) 1340 io_req_defer_failed(req, -EFAULT); 1341 else if (req->flags & REQ_F_FORCE_ASYNC) 1342 io_queue_iowq(req); 1343 else 1344 io_queue_sqe(req); 1345 } 1346 1347 void io_req_task_queue_fail(struct io_kiocb *req, int ret) 1348 { 1349 io_req_set_res(req, ret, 0); 1350 req->io_task_work.func = io_req_task_cancel; 1351 io_req_task_work_add(req); 1352 } 1353 1354 void io_req_task_queue(struct io_kiocb *req) 1355 { 1356 req->io_task_work.func = io_req_task_submit; 1357 io_req_task_work_add(req); 1358 } 1359 1360 void io_queue_next(struct io_kiocb *req) 1361 { 1362 struct io_kiocb *nxt = io_req_find_next(req); 1363 1364 if (nxt) 1365 io_req_task_queue(nxt); 1366 } 1367 1368 static void io_free_batch_list(struct io_ring_ctx *ctx, 1369 struct io_wq_work_node *node) 1370 __must_hold(&ctx->uring_lock) 1371 { 1372 do { 1373 struct io_kiocb *req = container_of(node, struct io_kiocb, 1374 comp_list); 1375 1376 if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { 1377 if (req->flags & REQ_F_REFCOUNT) { 1378 node = req->comp_list.next; 1379 if (!req_ref_put_and_test(req)) 1380 continue; 1381 } 1382 if ((req->flags & REQ_F_POLLED) && req->apoll) { 1383 struct async_poll *apoll = req->apoll; 1384 1385 if (apoll->double_poll) 1386 kfree(apoll->double_poll); 1387 if (!io_alloc_cache_put(&ctx->apoll_cache, apoll)) 1388 kfree(apoll); 1389 req->flags &= ~REQ_F_POLLED; 1390 } 1391 if (req->flags & IO_REQ_LINK_FLAGS) 1392 io_queue_next(req); 1393 if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) 1394 io_clean_op(req); 1395 } 1396 io_put_file(req); 1397 io_put_rsrc_node(ctx, req->rsrc_node); 1398 io_put_task(req->task); 1399 1400 node = req->comp_list.next; 1401 io_req_add_to_cache(req, ctx); 1402 } while (node); 1403 } 1404 1405 void __io_submit_flush_completions(struct io_ring_ctx *ctx) 1406 __must_hold(&ctx->uring_lock) 1407 { 1408 struct io_submit_state *state = &ctx->submit_state; 1409 struct io_wq_work_node *node; 1410 1411 __io_cq_lock(ctx); 1412 __wq_list_for_each(node, &state->compl_reqs) { 1413 struct io_kiocb *req = container_of(node, struct io_kiocb, 1414 comp_list); 1415 1416 if (!(req->flags & REQ_F_CQE_SKIP) && 1417 unlikely(!io_fill_cqe_req(ctx, req))) { 1418 if (ctx->lockless_cq) { 1419 spin_lock(&ctx->completion_lock); 1420 io_req_cqe_overflow(req); 1421 spin_unlock(&ctx->completion_lock); 1422 } else { 1423 io_req_cqe_overflow(req); 1424 } 1425 } 1426 } 1427 __io_cq_unlock_post(ctx); 1428 1429 if (!wq_list_empty(&state->compl_reqs)) { 1430 io_free_batch_list(ctx, state->compl_reqs.first); 1431 INIT_WQ_LIST(&state->compl_reqs); 1432 } 1433 ctx->submit_state.cq_flush = false; 1434 } 1435 1436 static unsigned io_cqring_events(struct io_ring_ctx *ctx) 1437 { 1438 /* See comment at the top of this file */ 1439 smp_rmb(); 1440 return __io_cqring_events(ctx); 1441 } 1442 1443 /* 1444 * We can't just wait for polled events to come to us, we have to actively 1445 * find and complete them. 1446 */ 1447 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) 1448 { 1449 if (!(ctx->flags & IORING_SETUP_IOPOLL)) 1450 return; 1451 1452 mutex_lock(&ctx->uring_lock); 1453 while (!wq_list_empty(&ctx->iopoll_list)) { 1454 /* let it sleep and repeat later if can't complete a request */ 1455 if (io_do_iopoll(ctx, true) == 0) 1456 break; 1457 /* 1458 * Ensure we allow local-to-the-cpu processing to take place, 1459 * in this case we need to ensure that we reap all events. 1460 * Also let task_work, etc. to progress by releasing the mutex 1461 */ 1462 if (need_resched()) { 1463 mutex_unlock(&ctx->uring_lock); 1464 cond_resched(); 1465 mutex_lock(&ctx->uring_lock); 1466 } 1467 } 1468 mutex_unlock(&ctx->uring_lock); 1469 } 1470 1471 static int io_iopoll_check(struct io_ring_ctx *ctx, long min) 1472 { 1473 unsigned int nr_events = 0; 1474 unsigned long check_cq; 1475 1476 lockdep_assert_held(&ctx->uring_lock); 1477 1478 if (!io_allowed_run_tw(ctx)) 1479 return -EEXIST; 1480 1481 check_cq = READ_ONCE(ctx->check_cq); 1482 if (unlikely(check_cq)) { 1483 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) 1484 __io_cqring_overflow_flush(ctx, false); 1485 /* 1486 * Similarly do not spin if we have not informed the user of any 1487 * dropped CQE. 1488 */ 1489 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) 1490 return -EBADR; 1491 } 1492 /* 1493 * Don't enter poll loop if we already have events pending. 1494 * If we do, we can potentially be spinning for commands that 1495 * already triggered a CQE (eg in error). 1496 */ 1497 if (io_cqring_events(ctx)) 1498 return 0; 1499 1500 do { 1501 int ret = 0; 1502 1503 /* 1504 * If a submit got punted to a workqueue, we can have the 1505 * application entering polling for a command before it gets 1506 * issued. That app will hold the uring_lock for the duration 1507 * of the poll right here, so we need to take a breather every 1508 * now and then to ensure that the issue has a chance to add 1509 * the poll to the issued list. Otherwise we can spin here 1510 * forever, while the workqueue is stuck trying to acquire the 1511 * very same mutex. 1512 */ 1513 if (wq_list_empty(&ctx->iopoll_list) || 1514 io_task_work_pending(ctx)) { 1515 u32 tail = ctx->cached_cq_tail; 1516 1517 (void) io_run_local_work_locked(ctx, min); 1518 1519 if (task_work_pending(current) || 1520 wq_list_empty(&ctx->iopoll_list)) { 1521 mutex_unlock(&ctx->uring_lock); 1522 io_run_task_work(); 1523 mutex_lock(&ctx->uring_lock); 1524 } 1525 /* some requests don't go through iopoll_list */ 1526 if (tail != ctx->cached_cq_tail || 1527 wq_list_empty(&ctx->iopoll_list)) 1528 break; 1529 } 1530 ret = io_do_iopoll(ctx, !min); 1531 if (unlikely(ret < 0)) 1532 return ret; 1533 1534 if (task_sigpending(current)) 1535 return -EINTR; 1536 if (need_resched()) 1537 break; 1538 1539 nr_events += ret; 1540 } while (nr_events < min); 1541 1542 return 0; 1543 } 1544 1545 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) 1546 { 1547 io_req_complete_defer(req); 1548 } 1549 1550 /* 1551 * After the iocb has been issued, it's safe to be found on the poll list. 1552 * Adding the kiocb to the list AFTER submission ensures that we don't 1553 * find it from a io_do_iopoll() thread before the issuer is done 1554 * accessing the kiocb cookie. 1555 */ 1556 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) 1557 { 1558 struct io_ring_ctx *ctx = req->ctx; 1559 const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; 1560 1561 /* workqueue context doesn't hold uring_lock, grab it now */ 1562 if (unlikely(needs_lock)) 1563 mutex_lock(&ctx->uring_lock); 1564 1565 /* 1566 * Track whether we have multiple files in our lists. This will impact 1567 * how we do polling eventually, not spinning if we're on potentially 1568 * different devices. 1569 */ 1570 if (wq_list_empty(&ctx->iopoll_list)) { 1571 ctx->poll_multi_queue = false; 1572 } else if (!ctx->poll_multi_queue) { 1573 struct io_kiocb *list_req; 1574 1575 list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, 1576 comp_list); 1577 if (list_req->file != req->file) 1578 ctx->poll_multi_queue = true; 1579 } 1580 1581 /* 1582 * For fast devices, IO may have already completed. If it has, add 1583 * it to the front so we find it first. 1584 */ 1585 if (READ_ONCE(req->iopoll_completed)) 1586 wq_list_add_head(&req->comp_list, &ctx->iopoll_list); 1587 else 1588 wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); 1589 1590 if (unlikely(needs_lock)) { 1591 /* 1592 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle 1593 * in sq thread task context or in io worker task context. If 1594 * current task context is sq thread, we don't need to check 1595 * whether should wake up sq thread. 1596 */ 1597 if ((ctx->flags & IORING_SETUP_SQPOLL) && 1598 wq_has_sleeper(&ctx->sq_data->wait)) 1599 wake_up(&ctx->sq_data->wait); 1600 1601 mutex_unlock(&ctx->uring_lock); 1602 } 1603 } 1604 1605 io_req_flags_t io_file_get_flags(struct file *file) 1606 { 1607 io_req_flags_t res = 0; 1608 1609 if (S_ISREG(file_inode(file)->i_mode)) 1610 res |= REQ_F_ISREG; 1611 if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) 1612 res |= REQ_F_SUPPORT_NOWAIT; 1613 return res; 1614 } 1615 1616 bool io_alloc_async_data(struct io_kiocb *req) 1617 { 1618 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1619 1620 WARN_ON_ONCE(!def->async_size); 1621 req->async_data = kmalloc(def->async_size, GFP_KERNEL); 1622 if (req->async_data) { 1623 req->flags |= REQ_F_ASYNC_DATA; 1624 return false; 1625 } 1626 return true; 1627 } 1628 1629 static u32 io_get_sequence(struct io_kiocb *req) 1630 { 1631 u32 seq = req->ctx->cached_sq_head; 1632 struct io_kiocb *cur; 1633 1634 /* need original cached_sq_head, but it was increased for each req */ 1635 io_for_each_link(cur, req) 1636 seq--; 1637 return seq; 1638 } 1639 1640 static __cold void io_drain_req(struct io_kiocb *req) 1641 __must_hold(&ctx->uring_lock) 1642 { 1643 struct io_ring_ctx *ctx = req->ctx; 1644 struct io_defer_entry *de; 1645 int ret; 1646 u32 seq = io_get_sequence(req); 1647 1648 /* Still need defer if there is pending req in defer list. */ 1649 spin_lock(&ctx->completion_lock); 1650 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { 1651 spin_unlock(&ctx->completion_lock); 1652 queue: 1653 ctx->drain_active = false; 1654 io_req_task_queue(req); 1655 return; 1656 } 1657 spin_unlock(&ctx->completion_lock); 1658 1659 io_prep_async_link(req); 1660 de = kmalloc(sizeof(*de), GFP_KERNEL); 1661 if (!de) { 1662 ret = -ENOMEM; 1663 io_req_defer_failed(req, ret); 1664 return; 1665 } 1666 1667 spin_lock(&ctx->completion_lock); 1668 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { 1669 spin_unlock(&ctx->completion_lock); 1670 kfree(de); 1671 goto queue; 1672 } 1673 1674 trace_io_uring_defer(req); 1675 de->req = req; 1676 de->seq = seq; 1677 list_add_tail(&de->list, &ctx->defer_list); 1678 spin_unlock(&ctx->completion_lock); 1679 } 1680 1681 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, 1682 unsigned int issue_flags) 1683 { 1684 if (req->file || !def->needs_file) 1685 return true; 1686 1687 if (req->flags & REQ_F_FIXED_FILE) 1688 req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); 1689 else 1690 req->file = io_file_get_normal(req, req->cqe.fd); 1691 1692 return !!req->file; 1693 } 1694 1695 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) 1696 { 1697 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1698 const struct cred *creds = NULL; 1699 int ret; 1700 1701 if (unlikely(!io_assign_file(req, def, issue_flags))) 1702 return -EBADF; 1703 1704 if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) 1705 creds = override_creds(req->creds); 1706 1707 if (!def->audit_skip) 1708 audit_uring_entry(req->opcode); 1709 1710 ret = def->issue(req, issue_flags); 1711 1712 if (!def->audit_skip) 1713 audit_uring_exit(!ret, ret); 1714 1715 if (creds) 1716 revert_creds(creds); 1717 1718 if (ret == IOU_OK) { 1719 if (issue_flags & IO_URING_F_COMPLETE_DEFER) 1720 io_req_complete_defer(req); 1721 else 1722 io_req_complete_post(req, issue_flags); 1723 1724 return 0; 1725 } 1726 1727 if (ret == IOU_ISSUE_SKIP_COMPLETE) { 1728 ret = 0; 1729 io_arm_ltimeout(req); 1730 1731 /* If the op doesn't have a file, we're not polling for it */ 1732 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) 1733 io_iopoll_req_issued(req, issue_flags); 1734 } 1735 return ret; 1736 } 1737 1738 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts) 1739 { 1740 io_tw_lock(req->ctx, ts); 1741 return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT| 1742 IO_URING_F_COMPLETE_DEFER); 1743 } 1744 1745 struct io_wq_work *io_wq_free_work(struct io_wq_work *work) 1746 { 1747 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 1748 struct io_kiocb *nxt = NULL; 1749 1750 if (req_ref_put_and_test(req)) { 1751 if (req->flags & IO_REQ_LINK_FLAGS) 1752 nxt = io_req_find_next(req); 1753 io_free_req(req); 1754 } 1755 return nxt ? &nxt->work : NULL; 1756 } 1757 1758 void io_wq_submit_work(struct io_wq_work *work) 1759 { 1760 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 1761 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 1762 unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ; 1763 bool needs_poll = false; 1764 int ret = 0, err = -ECANCELED; 1765 1766 /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ 1767 if (!(req->flags & REQ_F_REFCOUNT)) 1768 __io_req_set_refcount(req, 2); 1769 else 1770 req_ref_get(req); 1771 1772 io_arm_ltimeout(req); 1773 1774 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ 1775 if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) { 1776 fail: 1777 io_req_task_queue_fail(req, err); 1778 return; 1779 } 1780 if (!io_assign_file(req, def, issue_flags)) { 1781 err = -EBADF; 1782 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); 1783 goto fail; 1784 } 1785 1786 /* 1787 * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the 1788 * submitter task context. Final request completions are handed to the 1789 * right context, however this is not the case of auxiliary CQEs, 1790 * which is the main mean of operation for multishot requests. 1791 * Don't allow any multishot execution from io-wq. It's more restrictive 1792 * than necessary and also cleaner. 1793 */ 1794 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1795 err = -EBADFD; 1796 if (!io_file_can_poll(req)) 1797 goto fail; 1798 if (req->file->f_flags & O_NONBLOCK || 1799 req->file->f_mode & FMODE_NOWAIT) { 1800 err = -ECANCELED; 1801 if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) 1802 goto fail; 1803 return; 1804 } else { 1805 req->flags &= ~REQ_F_APOLL_MULTISHOT; 1806 } 1807 } 1808 1809 if (req->flags & REQ_F_FORCE_ASYNC) { 1810 bool opcode_poll = def->pollin || def->pollout; 1811 1812 if (opcode_poll && io_file_can_poll(req)) { 1813 needs_poll = true; 1814 issue_flags |= IO_URING_F_NONBLOCK; 1815 } 1816 } 1817 1818 do { 1819 ret = io_issue_sqe(req, issue_flags); 1820 if (ret != -EAGAIN) 1821 break; 1822 1823 /* 1824 * If REQ_F_NOWAIT is set, then don't wait or retry with 1825 * poll. -EAGAIN is final for that case. 1826 */ 1827 if (req->flags & REQ_F_NOWAIT) 1828 break; 1829 1830 /* 1831 * We can get EAGAIN for iopolled IO even though we're 1832 * forcing a sync submission from here, since we can't 1833 * wait for request slots on the block side. 1834 */ 1835 if (!needs_poll) { 1836 if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) 1837 break; 1838 if (io_wq_worker_stopped()) 1839 break; 1840 cond_resched(); 1841 continue; 1842 } 1843 1844 if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) 1845 return; 1846 /* aborted or ready, in either case retry blocking */ 1847 needs_poll = false; 1848 issue_flags &= ~IO_URING_F_NONBLOCK; 1849 } while (1); 1850 1851 /* avoid locking problems by failing it from a clean context */ 1852 if (ret) 1853 io_req_task_queue_fail(req, ret); 1854 } 1855 1856 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 1857 unsigned int issue_flags) 1858 { 1859 struct io_ring_ctx *ctx = req->ctx; 1860 struct io_fixed_file *slot; 1861 struct file *file = NULL; 1862 1863 io_ring_submit_lock(ctx, issue_flags); 1864 1865 if (unlikely((unsigned int)fd >= ctx->nr_user_files)) 1866 goto out; 1867 fd = array_index_nospec(fd, ctx->nr_user_files); 1868 slot = io_fixed_file_slot(&ctx->file_table, fd); 1869 if (!req->rsrc_node) 1870 __io_req_set_rsrc_node(req, ctx); 1871 req->flags |= io_slot_flags(slot); 1872 file = io_slot_file(slot); 1873 out: 1874 io_ring_submit_unlock(ctx, issue_flags); 1875 return file; 1876 } 1877 1878 struct file *io_file_get_normal(struct io_kiocb *req, int fd) 1879 { 1880 struct file *file = fget(fd); 1881 1882 trace_io_uring_file_get(req, fd); 1883 1884 /* we don't allow fixed io_uring files */ 1885 if (file && io_is_uring_fops(file)) 1886 io_req_track_inflight(req); 1887 return file; 1888 } 1889 1890 static void io_queue_async(struct io_kiocb *req, int ret) 1891 __must_hold(&req->ctx->uring_lock) 1892 { 1893 struct io_kiocb *linked_timeout; 1894 1895 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { 1896 io_req_defer_failed(req, ret); 1897 return; 1898 } 1899 1900 linked_timeout = io_prep_linked_timeout(req); 1901 1902 switch (io_arm_poll_handler(req, 0)) { 1903 case IO_APOLL_READY: 1904 io_kbuf_recycle(req, 0); 1905 io_req_task_queue(req); 1906 break; 1907 case IO_APOLL_ABORTED: 1908 io_kbuf_recycle(req, 0); 1909 io_queue_iowq(req); 1910 break; 1911 case IO_APOLL_OK: 1912 break; 1913 } 1914 1915 if (linked_timeout) 1916 io_queue_linked_timeout(linked_timeout); 1917 } 1918 1919 static inline void io_queue_sqe(struct io_kiocb *req) 1920 __must_hold(&req->ctx->uring_lock) 1921 { 1922 int ret; 1923 1924 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); 1925 1926 /* 1927 * We async punt it if the file wasn't marked NOWAIT, or if the file 1928 * doesn't support non-blocking read/write attempts 1929 */ 1930 if (unlikely(ret)) 1931 io_queue_async(req, ret); 1932 } 1933 1934 static void io_queue_sqe_fallback(struct io_kiocb *req) 1935 __must_hold(&req->ctx->uring_lock) 1936 { 1937 if (unlikely(req->flags & REQ_F_FAIL)) { 1938 /* 1939 * We don't submit, fail them all, for that replace hardlinks 1940 * with normal links. Extra REQ_F_LINK is tolerated. 1941 */ 1942 req->flags &= ~REQ_F_HARDLINK; 1943 req->flags |= REQ_F_LINK; 1944 io_req_defer_failed(req, req->cqe.res); 1945 } else { 1946 if (unlikely(req->ctx->drain_active)) 1947 io_drain_req(req); 1948 else 1949 io_queue_iowq(req); 1950 } 1951 } 1952 1953 /* 1954 * Check SQE restrictions (opcode and flags). 1955 * 1956 * Returns 'true' if SQE is allowed, 'false' otherwise. 1957 */ 1958 static inline bool io_check_restriction(struct io_ring_ctx *ctx, 1959 struct io_kiocb *req, 1960 unsigned int sqe_flags) 1961 { 1962 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) 1963 return false; 1964 1965 if ((sqe_flags & ctx->restrictions.sqe_flags_required) != 1966 ctx->restrictions.sqe_flags_required) 1967 return false; 1968 1969 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | 1970 ctx->restrictions.sqe_flags_required)) 1971 return false; 1972 1973 return true; 1974 } 1975 1976 static void io_init_req_drain(struct io_kiocb *req) 1977 { 1978 struct io_ring_ctx *ctx = req->ctx; 1979 struct io_kiocb *head = ctx->submit_state.link.head; 1980 1981 ctx->drain_active = true; 1982 if (head) { 1983 /* 1984 * If we need to drain a request in the middle of a link, drain 1985 * the head request and the next request/link after the current 1986 * link. Considering sequential execution of links, 1987 * REQ_F_IO_DRAIN will be maintained for every request of our 1988 * link. 1989 */ 1990 head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; 1991 ctx->drain_next = true; 1992 } 1993 } 1994 1995 static __cold int io_init_fail_req(struct io_kiocb *req, int err) 1996 { 1997 /* ensure per-opcode data is cleared if we fail before prep */ 1998 memset(&req->cmd.data, 0, sizeof(req->cmd.data)); 1999 return err; 2000 } 2001 2002 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, 2003 const struct io_uring_sqe *sqe) 2004 __must_hold(&ctx->uring_lock) 2005 { 2006 const struct io_issue_def *def; 2007 unsigned int sqe_flags; 2008 int personality; 2009 u8 opcode; 2010 2011 /* req is partially pre-initialised, see io_preinit_req() */ 2012 req->opcode = opcode = READ_ONCE(sqe->opcode); 2013 /* same numerical values with corresponding REQ_F_*, safe to copy */ 2014 sqe_flags = READ_ONCE(sqe->flags); 2015 req->flags = (io_req_flags_t) sqe_flags; 2016 req->cqe.user_data = READ_ONCE(sqe->user_data); 2017 req->file = NULL; 2018 req->rsrc_node = NULL; 2019 req->task = current; 2020 req->cancel_seq_set = false; 2021 2022 if (unlikely(opcode >= IORING_OP_LAST)) { 2023 req->opcode = 0; 2024 return io_init_fail_req(req, -EINVAL); 2025 } 2026 def = &io_issue_defs[opcode]; 2027 if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { 2028 /* enforce forwards compatibility on users */ 2029 if (sqe_flags & ~SQE_VALID_FLAGS) 2030 return io_init_fail_req(req, -EINVAL); 2031 if (sqe_flags & IOSQE_BUFFER_SELECT) { 2032 if (!def->buffer_select) 2033 return io_init_fail_req(req, -EOPNOTSUPP); 2034 req->buf_index = READ_ONCE(sqe->buf_group); 2035 } 2036 if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) 2037 ctx->drain_disabled = true; 2038 if (sqe_flags & IOSQE_IO_DRAIN) { 2039 if (ctx->drain_disabled) 2040 return io_init_fail_req(req, -EOPNOTSUPP); 2041 io_init_req_drain(req); 2042 } 2043 } 2044 if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { 2045 if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) 2046 return io_init_fail_req(req, -EACCES); 2047 /* knock it to the slow queue path, will be drained there */ 2048 if (ctx->drain_active) 2049 req->flags |= REQ_F_FORCE_ASYNC; 2050 /* if there is no link, we're at "next" request and need to drain */ 2051 if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { 2052 ctx->drain_next = false; 2053 ctx->drain_active = true; 2054 req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; 2055 } 2056 } 2057 2058 if (!def->ioprio && sqe->ioprio) 2059 return io_init_fail_req(req, -EINVAL); 2060 if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) 2061 return io_init_fail_req(req, -EINVAL); 2062 2063 if (def->needs_file) { 2064 struct io_submit_state *state = &ctx->submit_state; 2065 2066 req->cqe.fd = READ_ONCE(sqe->fd); 2067 2068 /* 2069 * Plug now if we have more than 2 IO left after this, and the 2070 * target is potentially a read/write to block based storage. 2071 */ 2072 if (state->need_plug && def->plug) { 2073 state->plug_started = true; 2074 state->need_plug = false; 2075 blk_start_plug_nr_ios(&state->plug, state->submit_nr); 2076 } 2077 } 2078 2079 personality = READ_ONCE(sqe->personality); 2080 if (personality) { 2081 int ret; 2082 2083 req->creds = xa_load(&ctx->personalities, personality); 2084 if (!req->creds) 2085 return io_init_fail_req(req, -EINVAL); 2086 get_cred(req->creds); 2087 ret = security_uring_override_creds(req->creds); 2088 if (ret) { 2089 put_cred(req->creds); 2090 return io_init_fail_req(req, ret); 2091 } 2092 req->flags |= REQ_F_CREDS; 2093 } 2094 2095 return def->prep(req, sqe); 2096 } 2097 2098 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, 2099 struct io_kiocb *req, int ret) 2100 { 2101 struct io_ring_ctx *ctx = req->ctx; 2102 struct io_submit_link *link = &ctx->submit_state.link; 2103 struct io_kiocb *head = link->head; 2104 2105 trace_io_uring_req_failed(sqe, req, ret); 2106 2107 /* 2108 * Avoid breaking links in the middle as it renders links with SQPOLL 2109 * unusable. Instead of failing eagerly, continue assembling the link if 2110 * applicable and mark the head with REQ_F_FAIL. The link flushing code 2111 * should find the flag and handle the rest. 2112 */ 2113 req_fail_link_node(req, ret); 2114 if (head && !(head->flags & REQ_F_FAIL)) 2115 req_fail_link_node(head, -ECANCELED); 2116 2117 if (!(req->flags & IO_REQ_LINK_FLAGS)) { 2118 if (head) { 2119 link->last->link = req; 2120 link->head = NULL; 2121 req = head; 2122 } 2123 io_queue_sqe_fallback(req); 2124 return ret; 2125 } 2126 2127 if (head) 2128 link->last->link = req; 2129 else 2130 link->head = req; 2131 link->last = req; 2132 return 0; 2133 } 2134 2135 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, 2136 const struct io_uring_sqe *sqe) 2137 __must_hold(&ctx->uring_lock) 2138 { 2139 struct io_submit_link *link = &ctx->submit_state.link; 2140 int ret; 2141 2142 ret = io_init_req(ctx, req, sqe); 2143 if (unlikely(ret)) 2144 return io_submit_fail_init(sqe, req, ret); 2145 2146 trace_io_uring_submit_req(req); 2147 2148 /* 2149 * If we already have a head request, queue this one for async 2150 * submittal once the head completes. If we don't have a head but 2151 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be 2152 * submitted sync once the chain is complete. If none of those 2153 * conditions are true (normal request), then just queue it. 2154 */ 2155 if (unlikely(link->head)) { 2156 trace_io_uring_link(req, link->head); 2157 link->last->link = req; 2158 link->last = req; 2159 2160 if (req->flags & IO_REQ_LINK_FLAGS) 2161 return 0; 2162 /* last request of the link, flush it */ 2163 req = link->head; 2164 link->head = NULL; 2165 if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) 2166 goto fallback; 2167 2168 } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | 2169 REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { 2170 if (req->flags & IO_REQ_LINK_FLAGS) { 2171 link->head = req; 2172 link->last = req; 2173 } else { 2174 fallback: 2175 io_queue_sqe_fallback(req); 2176 } 2177 return 0; 2178 } 2179 2180 io_queue_sqe(req); 2181 return 0; 2182 } 2183 2184 /* 2185 * Batched submission is done, ensure local IO is flushed out. 2186 */ 2187 static void io_submit_state_end(struct io_ring_ctx *ctx) 2188 { 2189 struct io_submit_state *state = &ctx->submit_state; 2190 2191 if (unlikely(state->link.head)) 2192 io_queue_sqe_fallback(state->link.head); 2193 /* flush only after queuing links as they can generate completions */ 2194 io_submit_flush_completions(ctx); 2195 if (state->plug_started) 2196 blk_finish_plug(&state->plug); 2197 } 2198 2199 /* 2200 * Start submission side cache. 2201 */ 2202 static void io_submit_state_start(struct io_submit_state *state, 2203 unsigned int max_ios) 2204 { 2205 state->plug_started = false; 2206 state->need_plug = max_ios > 2; 2207 state->submit_nr = max_ios; 2208 /* set only head, no need to init link_last in advance */ 2209 state->link.head = NULL; 2210 } 2211 2212 static void io_commit_sqring(struct io_ring_ctx *ctx) 2213 { 2214 struct io_rings *rings = ctx->rings; 2215 2216 /* 2217 * Ensure any loads from the SQEs are done at this point, 2218 * since once we write the new head, the application could 2219 * write new data to them. 2220 */ 2221 smp_store_release(&rings->sq.head, ctx->cached_sq_head); 2222 } 2223 2224 /* 2225 * Fetch an sqe, if one is available. Note this returns a pointer to memory 2226 * that is mapped by userspace. This means that care needs to be taken to 2227 * ensure that reads are stable, as we cannot rely on userspace always 2228 * being a good citizen. If members of the sqe are validated and then later 2229 * used, it's important that those reads are done through READ_ONCE() to 2230 * prevent a re-load down the line. 2231 */ 2232 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) 2233 { 2234 unsigned mask = ctx->sq_entries - 1; 2235 unsigned head = ctx->cached_sq_head++ & mask; 2236 2237 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) { 2238 head = READ_ONCE(ctx->sq_array[head]); 2239 if (unlikely(head >= ctx->sq_entries)) { 2240 /* drop invalid entries */ 2241 spin_lock(&ctx->completion_lock); 2242 ctx->cq_extra--; 2243 spin_unlock(&ctx->completion_lock); 2244 WRITE_ONCE(ctx->rings->sq_dropped, 2245 READ_ONCE(ctx->rings->sq_dropped) + 1); 2246 return false; 2247 } 2248 } 2249 2250 /* 2251 * The cached sq head (or cq tail) serves two purposes: 2252 * 2253 * 1) allows us to batch the cost of updating the user visible 2254 * head updates. 2255 * 2) allows the kernel side to track the head on its own, even 2256 * though the application is the one updating it. 2257 */ 2258 2259 /* double index for 128-byte SQEs, twice as long */ 2260 if (ctx->flags & IORING_SETUP_SQE128) 2261 head <<= 1; 2262 *sqe = &ctx->sq_sqes[head]; 2263 return true; 2264 } 2265 2266 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) 2267 __must_hold(&ctx->uring_lock) 2268 { 2269 unsigned int entries = io_sqring_entries(ctx); 2270 unsigned int left; 2271 int ret; 2272 2273 if (unlikely(!entries)) 2274 return 0; 2275 /* make sure SQ entry isn't read before tail */ 2276 ret = left = min(nr, entries); 2277 io_get_task_refs(left); 2278 io_submit_state_start(&ctx->submit_state, left); 2279 2280 do { 2281 const struct io_uring_sqe *sqe; 2282 struct io_kiocb *req; 2283 2284 if (unlikely(!io_alloc_req(ctx, &req))) 2285 break; 2286 if (unlikely(!io_get_sqe(ctx, &sqe))) { 2287 io_req_add_to_cache(req, ctx); 2288 break; 2289 } 2290 2291 /* 2292 * Continue submitting even for sqe failure if the 2293 * ring was setup with IORING_SETUP_SUBMIT_ALL 2294 */ 2295 if (unlikely(io_submit_sqe(ctx, req, sqe)) && 2296 !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { 2297 left--; 2298 break; 2299 } 2300 } while (--left); 2301 2302 if (unlikely(left)) { 2303 ret -= left; 2304 /* try again if it submitted nothing and can't allocate a req */ 2305 if (!ret && io_req_cache_empty(ctx)) 2306 ret = -EAGAIN; 2307 current->io_uring->cached_refs += left; 2308 } 2309 2310 io_submit_state_end(ctx); 2311 /* Commit SQ ring head once we've consumed and submitted all SQEs */ 2312 io_commit_sqring(ctx); 2313 return ret; 2314 } 2315 2316 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, 2317 int wake_flags, void *key) 2318 { 2319 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); 2320 2321 /* 2322 * Cannot safely flush overflowed CQEs from here, ensure we wake up 2323 * the task, and the next invocation will do it. 2324 */ 2325 if (io_should_wake(iowq) || io_has_work(iowq->ctx)) 2326 return autoremove_wake_function(curr, mode, wake_flags, key); 2327 return -1; 2328 } 2329 2330 int io_run_task_work_sig(struct io_ring_ctx *ctx) 2331 { 2332 if (!llist_empty(&ctx->work_llist)) { 2333 __set_current_state(TASK_RUNNING); 2334 if (io_run_local_work(ctx, INT_MAX) > 0) 2335 return 0; 2336 } 2337 if (io_run_task_work() > 0) 2338 return 0; 2339 if (task_sigpending(current)) 2340 return -EINTR; 2341 return 0; 2342 } 2343 2344 static bool current_pending_io(void) 2345 { 2346 struct io_uring_task *tctx = current->io_uring; 2347 2348 if (!tctx) 2349 return false; 2350 return percpu_counter_read_positive(&tctx->inflight); 2351 } 2352 2353 /* when returns >0, the caller should retry */ 2354 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, 2355 struct io_wait_queue *iowq) 2356 { 2357 int ret; 2358 2359 if (unlikely(READ_ONCE(ctx->check_cq))) 2360 return 1; 2361 if (unlikely(!llist_empty(&ctx->work_llist))) 2362 return 1; 2363 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) 2364 return 1; 2365 if (unlikely(task_sigpending(current))) 2366 return -EINTR; 2367 if (unlikely(io_should_wake(iowq))) 2368 return 0; 2369 2370 /* 2371 * Mark us as being in io_wait if we have pending requests, so cpufreq 2372 * can take into account that the task is waiting for IO - turns out 2373 * to be important for low QD IO. 2374 */ 2375 if (current_pending_io()) 2376 current->in_iowait = 1; 2377 ret = 0; 2378 if (iowq->timeout == KTIME_MAX) 2379 schedule(); 2380 else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS)) 2381 ret = -ETIME; 2382 current->in_iowait = 0; 2383 return ret; 2384 } 2385 2386 /* 2387 * Wait until events become available, if we don't already have some. The 2388 * application must reap them itself, as they reside on the shared cq ring. 2389 */ 2390 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, 2391 const sigset_t __user *sig, size_t sigsz, 2392 struct __kernel_timespec __user *uts) 2393 { 2394 struct io_wait_queue iowq; 2395 struct io_rings *rings = ctx->rings; 2396 int ret; 2397 2398 if (!io_allowed_run_tw(ctx)) 2399 return -EEXIST; 2400 if (!llist_empty(&ctx->work_llist)) 2401 io_run_local_work(ctx, min_events); 2402 io_run_task_work(); 2403 2404 if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) 2405 io_cqring_do_overflow_flush(ctx); 2406 if (__io_cqring_events_user(ctx) >= min_events) 2407 return 0; 2408 2409 init_waitqueue_func_entry(&iowq.wq, io_wake_function); 2410 iowq.wq.private = current; 2411 INIT_LIST_HEAD(&iowq.wq.entry); 2412 iowq.ctx = ctx; 2413 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); 2414 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; 2415 iowq.timeout = KTIME_MAX; 2416 2417 if (uts) { 2418 struct timespec64 ts; 2419 ktime_t dt; 2420 2421 if (get_timespec64(&ts, uts)) 2422 return -EFAULT; 2423 2424 dt = timespec64_to_ktime(ts); 2425 iowq.timeout = ktime_add(dt, ktime_get()); 2426 io_napi_adjust_timeout(ctx, &iowq, dt); 2427 } 2428 2429 if (sig) { 2430 #ifdef CONFIG_COMPAT 2431 if (in_compat_syscall()) 2432 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, 2433 sigsz); 2434 else 2435 #endif 2436 ret = set_user_sigmask(sig, sigsz); 2437 2438 if (ret) 2439 return ret; 2440 } 2441 2442 io_napi_busy_loop(ctx, &iowq); 2443 2444 trace_io_uring_cqring_wait(ctx, min_events); 2445 do { 2446 int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail); 2447 unsigned long check_cq; 2448 2449 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 2450 atomic_set(&ctx->cq_wait_nr, nr_wait); 2451 set_current_state(TASK_INTERRUPTIBLE); 2452 } else { 2453 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, 2454 TASK_INTERRUPTIBLE); 2455 } 2456 2457 ret = io_cqring_wait_schedule(ctx, &iowq); 2458 __set_current_state(TASK_RUNNING); 2459 atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT); 2460 2461 /* 2462 * Run task_work after scheduling and before io_should_wake(). 2463 * If we got woken because of task_work being processed, run it 2464 * now rather than let the caller do another wait loop. 2465 */ 2466 io_run_task_work(); 2467 if (!llist_empty(&ctx->work_llist)) 2468 io_run_local_work(ctx, nr_wait); 2469 2470 /* 2471 * Non-local task_work will be run on exit to userspace, but 2472 * if we're using DEFER_TASKRUN, then we could have waited 2473 * with a timeout for a number of requests. If the timeout 2474 * hits, we could have some requests ready to process. Ensure 2475 * this break is _after_ we have run task_work, to avoid 2476 * deferring running potentially pending requests until the 2477 * next time we wait for events. 2478 */ 2479 if (ret < 0) 2480 break; 2481 2482 check_cq = READ_ONCE(ctx->check_cq); 2483 if (unlikely(check_cq)) { 2484 /* let the caller flush overflows, retry */ 2485 if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) 2486 io_cqring_do_overflow_flush(ctx); 2487 if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { 2488 ret = -EBADR; 2489 break; 2490 } 2491 } 2492 2493 if (io_should_wake(&iowq)) { 2494 ret = 0; 2495 break; 2496 } 2497 cond_resched(); 2498 } while (1); 2499 2500 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 2501 finish_wait(&ctx->cq_wait, &iowq.wq); 2502 restore_saved_sigmask_unless(ret == -EINTR); 2503 2504 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; 2505 } 2506 2507 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, 2508 size_t size) 2509 { 2510 return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr, 2511 size); 2512 } 2513 2514 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, 2515 size_t size) 2516 { 2517 return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr, 2518 size); 2519 } 2520 2521 static void io_rings_free(struct io_ring_ctx *ctx) 2522 { 2523 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { 2524 io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages, 2525 true); 2526 io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages, 2527 true); 2528 } else { 2529 io_pages_free(&ctx->ring_pages, ctx->n_ring_pages); 2530 ctx->n_ring_pages = 0; 2531 io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages); 2532 ctx->n_sqe_pages = 0; 2533 vunmap(ctx->rings); 2534 vunmap(ctx->sq_sqes); 2535 } 2536 2537 ctx->rings = NULL; 2538 ctx->sq_sqes = NULL; 2539 } 2540 2541 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, 2542 unsigned int cq_entries, size_t *sq_offset) 2543 { 2544 struct io_rings *rings; 2545 size_t off, sq_array_size; 2546 2547 off = struct_size(rings, cqes, cq_entries); 2548 if (off == SIZE_MAX) 2549 return SIZE_MAX; 2550 if (ctx->flags & IORING_SETUP_CQE32) { 2551 if (check_shl_overflow(off, 1, &off)) 2552 return SIZE_MAX; 2553 } 2554 2555 #ifdef CONFIG_SMP 2556 off = ALIGN(off, SMP_CACHE_BYTES); 2557 if (off == 0) 2558 return SIZE_MAX; 2559 #endif 2560 2561 if (ctx->flags & IORING_SETUP_NO_SQARRAY) { 2562 *sq_offset = SIZE_MAX; 2563 return off; 2564 } 2565 2566 *sq_offset = off; 2567 2568 sq_array_size = array_size(sizeof(u32), sq_entries); 2569 if (sq_array_size == SIZE_MAX) 2570 return SIZE_MAX; 2571 2572 if (check_add_overflow(off, sq_array_size, &off)) 2573 return SIZE_MAX; 2574 2575 return off; 2576 } 2577 2578 static void io_req_caches_free(struct io_ring_ctx *ctx) 2579 { 2580 struct io_kiocb *req; 2581 int nr = 0; 2582 2583 mutex_lock(&ctx->uring_lock); 2584 2585 while (!io_req_cache_empty(ctx)) { 2586 req = io_extract_req(ctx); 2587 kmem_cache_free(req_cachep, req); 2588 nr++; 2589 } 2590 if (nr) 2591 percpu_ref_put_many(&ctx->refs, nr); 2592 mutex_unlock(&ctx->uring_lock); 2593 } 2594 2595 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) 2596 { 2597 io_sq_thread_finish(ctx); 2598 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ 2599 if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list))) 2600 return; 2601 2602 mutex_lock(&ctx->uring_lock); 2603 if (ctx->buf_data) 2604 __io_sqe_buffers_unregister(ctx); 2605 if (ctx->file_data) 2606 __io_sqe_files_unregister(ctx); 2607 io_cqring_overflow_kill(ctx); 2608 io_eventfd_unregister(ctx); 2609 io_alloc_cache_free(&ctx->apoll_cache, kfree); 2610 io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); 2611 io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); 2612 io_alloc_cache_free(&ctx->uring_cache, kfree); 2613 io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); 2614 io_futex_cache_free(ctx); 2615 io_destroy_buffers(ctx); 2616 mutex_unlock(&ctx->uring_lock); 2617 if (ctx->sq_creds) 2618 put_cred(ctx->sq_creds); 2619 if (ctx->submitter_task) 2620 put_task_struct(ctx->submitter_task); 2621 2622 /* there are no registered resources left, nobody uses it */ 2623 if (ctx->rsrc_node) 2624 io_rsrc_node_destroy(ctx, ctx->rsrc_node); 2625 2626 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); 2627 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); 2628 2629 io_alloc_cache_free(&ctx->rsrc_node_cache, kfree); 2630 if (ctx->mm_account) { 2631 mmdrop(ctx->mm_account); 2632 ctx->mm_account = NULL; 2633 } 2634 io_rings_free(ctx); 2635 2636 percpu_ref_exit(&ctx->refs); 2637 free_uid(ctx->user); 2638 io_req_caches_free(ctx); 2639 if (ctx->hash_map) 2640 io_wq_put_hash(ctx->hash_map); 2641 io_napi_free(ctx); 2642 kfree(ctx->cancel_table.hbs); 2643 kfree(ctx->cancel_table_locked.hbs); 2644 xa_destroy(&ctx->io_bl_xa); 2645 kfree(ctx); 2646 } 2647 2648 static __cold void io_activate_pollwq_cb(struct callback_head *cb) 2649 { 2650 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, 2651 poll_wq_task_work); 2652 2653 mutex_lock(&ctx->uring_lock); 2654 ctx->poll_activated = true; 2655 mutex_unlock(&ctx->uring_lock); 2656 2657 /* 2658 * Wake ups for some events between start of polling and activation 2659 * might've been lost due to loose synchronisation. 2660 */ 2661 wake_up_all(&ctx->poll_wq); 2662 percpu_ref_put(&ctx->refs); 2663 } 2664 2665 __cold void io_activate_pollwq(struct io_ring_ctx *ctx) 2666 { 2667 spin_lock(&ctx->completion_lock); 2668 /* already activated or in progress */ 2669 if (ctx->poll_activated || ctx->poll_wq_task_work.func) 2670 goto out; 2671 if (WARN_ON_ONCE(!ctx->task_complete)) 2672 goto out; 2673 if (!ctx->submitter_task) 2674 goto out; 2675 /* 2676 * with ->submitter_task only the submitter task completes requests, we 2677 * only need to sync with it, which is done by injecting a tw 2678 */ 2679 init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb); 2680 percpu_ref_get(&ctx->refs); 2681 if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL)) 2682 percpu_ref_put(&ctx->refs); 2683 out: 2684 spin_unlock(&ctx->completion_lock); 2685 } 2686 2687 static __poll_t io_uring_poll(struct file *file, poll_table *wait) 2688 { 2689 struct io_ring_ctx *ctx = file->private_data; 2690 __poll_t mask = 0; 2691 2692 if (unlikely(!ctx->poll_activated)) 2693 io_activate_pollwq(ctx); 2694 2695 poll_wait(file, &ctx->poll_wq, wait); 2696 /* 2697 * synchronizes with barrier from wq_has_sleeper call in 2698 * io_commit_cqring 2699 */ 2700 smp_rmb(); 2701 if (!io_sqring_full(ctx)) 2702 mask |= EPOLLOUT | EPOLLWRNORM; 2703 2704 /* 2705 * Don't flush cqring overflow list here, just do a simple check. 2706 * Otherwise there could possible be ABBA deadlock: 2707 * CPU0 CPU1 2708 * ---- ---- 2709 * lock(&ctx->uring_lock); 2710 * lock(&ep->mtx); 2711 * lock(&ctx->uring_lock); 2712 * lock(&ep->mtx); 2713 * 2714 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this 2715 * pushes them to do the flush. 2716 */ 2717 2718 if (__io_cqring_events_user(ctx) || io_has_work(ctx)) 2719 mask |= EPOLLIN | EPOLLRDNORM; 2720 2721 return mask; 2722 } 2723 2724 struct io_tctx_exit { 2725 struct callback_head task_work; 2726 struct completion completion; 2727 struct io_ring_ctx *ctx; 2728 }; 2729 2730 static __cold void io_tctx_exit_cb(struct callback_head *cb) 2731 { 2732 struct io_uring_task *tctx = current->io_uring; 2733 struct io_tctx_exit *work; 2734 2735 work = container_of(cb, struct io_tctx_exit, task_work); 2736 /* 2737 * When @in_cancel, we're in cancellation and it's racy to remove the 2738 * node. It'll be removed by the end of cancellation, just ignore it. 2739 * tctx can be NULL if the queueing of this task_work raced with 2740 * work cancelation off the exec path. 2741 */ 2742 if (tctx && !atomic_read(&tctx->in_cancel)) 2743 io_uring_del_tctx_node((unsigned long)work->ctx); 2744 complete(&work->completion); 2745 } 2746 2747 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) 2748 { 2749 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 2750 2751 return req->ctx == data; 2752 } 2753 2754 static __cold void io_ring_exit_work(struct work_struct *work) 2755 { 2756 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); 2757 unsigned long timeout = jiffies + HZ * 60 * 5; 2758 unsigned long interval = HZ / 20; 2759 struct io_tctx_exit exit; 2760 struct io_tctx_node *node; 2761 int ret; 2762 2763 /* 2764 * If we're doing polled IO and end up having requests being 2765 * submitted async (out-of-line), then completions can come in while 2766 * we're waiting for refs to drop. We need to reap these manually, 2767 * as nobody else will be looking for them. 2768 */ 2769 do { 2770 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { 2771 mutex_lock(&ctx->uring_lock); 2772 io_cqring_overflow_kill(ctx); 2773 mutex_unlock(&ctx->uring_lock); 2774 } 2775 2776 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 2777 io_move_task_work_from_local(ctx); 2778 2779 while (io_uring_try_cancel_requests(ctx, NULL, true)) 2780 cond_resched(); 2781 2782 if (ctx->sq_data) { 2783 struct io_sq_data *sqd = ctx->sq_data; 2784 struct task_struct *tsk; 2785 2786 io_sq_thread_park(sqd); 2787 tsk = sqd->thread; 2788 if (tsk && tsk->io_uring && tsk->io_uring->io_wq) 2789 io_wq_cancel_cb(tsk->io_uring->io_wq, 2790 io_cancel_ctx_cb, ctx, true); 2791 io_sq_thread_unpark(sqd); 2792 } 2793 2794 io_req_caches_free(ctx); 2795 2796 if (WARN_ON_ONCE(time_after(jiffies, timeout))) { 2797 /* there is little hope left, don't run it too often */ 2798 interval = HZ * 60; 2799 } 2800 /* 2801 * This is really an uninterruptible wait, as it has to be 2802 * complete. But it's also run from a kworker, which doesn't 2803 * take signals, so it's fine to make it interruptible. This 2804 * avoids scenarios where we knowingly can wait much longer 2805 * on completions, for example if someone does a SIGSTOP on 2806 * a task that needs to finish task_work to make this loop 2807 * complete. That's a synthetic situation that should not 2808 * cause a stuck task backtrace, and hence a potential panic 2809 * on stuck tasks if that is enabled. 2810 */ 2811 } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval)); 2812 2813 init_completion(&exit.completion); 2814 init_task_work(&exit.task_work, io_tctx_exit_cb); 2815 exit.ctx = ctx; 2816 2817 mutex_lock(&ctx->uring_lock); 2818 while (!list_empty(&ctx->tctx_list)) { 2819 WARN_ON_ONCE(time_after(jiffies, timeout)); 2820 2821 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, 2822 ctx_node); 2823 /* don't spin on a single task if cancellation failed */ 2824 list_rotate_left(&ctx->tctx_list); 2825 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); 2826 if (WARN_ON_ONCE(ret)) 2827 continue; 2828 2829 mutex_unlock(&ctx->uring_lock); 2830 /* 2831 * See comment above for 2832 * wait_for_completion_interruptible_timeout() on why this 2833 * wait is marked as interruptible. 2834 */ 2835 wait_for_completion_interruptible(&exit.completion); 2836 mutex_lock(&ctx->uring_lock); 2837 } 2838 mutex_unlock(&ctx->uring_lock); 2839 spin_lock(&ctx->completion_lock); 2840 spin_unlock(&ctx->completion_lock); 2841 2842 /* pairs with RCU read section in io_req_local_work_add() */ 2843 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 2844 synchronize_rcu(); 2845 2846 io_ring_ctx_free(ctx); 2847 } 2848 2849 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) 2850 { 2851 unsigned long index; 2852 struct creds *creds; 2853 2854 mutex_lock(&ctx->uring_lock); 2855 percpu_ref_kill(&ctx->refs); 2856 xa_for_each(&ctx->personalities, index, creds) 2857 io_unregister_personality(ctx, index); 2858 mutex_unlock(&ctx->uring_lock); 2859 2860 flush_delayed_work(&ctx->fallback_work); 2861 2862 INIT_WORK(&ctx->exit_work, io_ring_exit_work); 2863 /* 2864 * Use system_unbound_wq to avoid spawning tons of event kworkers 2865 * if we're exiting a ton of rings at the same time. It just adds 2866 * noise and overhead, there's no discernable change in runtime 2867 * over using system_wq. 2868 */ 2869 queue_work(iou_wq, &ctx->exit_work); 2870 } 2871 2872 static int io_uring_release(struct inode *inode, struct file *file) 2873 { 2874 struct io_ring_ctx *ctx = file->private_data; 2875 2876 file->private_data = NULL; 2877 io_ring_ctx_wait_and_kill(ctx); 2878 return 0; 2879 } 2880 2881 struct io_task_cancel { 2882 struct task_struct *task; 2883 bool all; 2884 }; 2885 2886 static bool io_cancel_task_cb(struct io_wq_work *work, void *data) 2887 { 2888 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 2889 struct io_task_cancel *cancel = data; 2890 2891 return io_match_task_safe(req, cancel->task, cancel->all); 2892 } 2893 2894 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, 2895 struct task_struct *task, 2896 bool cancel_all) 2897 { 2898 struct io_defer_entry *de; 2899 LIST_HEAD(list); 2900 2901 spin_lock(&ctx->completion_lock); 2902 list_for_each_entry_reverse(de, &ctx->defer_list, list) { 2903 if (io_match_task_safe(de->req, task, cancel_all)) { 2904 list_cut_position(&list, &ctx->defer_list, &de->list); 2905 break; 2906 } 2907 } 2908 spin_unlock(&ctx->completion_lock); 2909 if (list_empty(&list)) 2910 return false; 2911 2912 while (!list_empty(&list)) { 2913 de = list_first_entry(&list, struct io_defer_entry, list); 2914 list_del_init(&de->list); 2915 io_req_task_queue_fail(de->req, -ECANCELED); 2916 kfree(de); 2917 } 2918 return true; 2919 } 2920 2921 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) 2922 { 2923 struct io_tctx_node *node; 2924 enum io_wq_cancel cret; 2925 bool ret = false; 2926 2927 mutex_lock(&ctx->uring_lock); 2928 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 2929 struct io_uring_task *tctx = node->task->io_uring; 2930 2931 /* 2932 * io_wq will stay alive while we hold uring_lock, because it's 2933 * killed after ctx nodes, which requires to take the lock. 2934 */ 2935 if (!tctx || !tctx->io_wq) 2936 continue; 2937 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); 2938 ret |= (cret != IO_WQ_CANCEL_NOTFOUND); 2939 } 2940 mutex_unlock(&ctx->uring_lock); 2941 2942 return ret; 2943 } 2944 2945 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 2946 struct task_struct *task, 2947 bool cancel_all) 2948 { 2949 struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; 2950 struct io_uring_task *tctx = task ? task->io_uring : NULL; 2951 enum io_wq_cancel cret; 2952 bool ret = false; 2953 2954 /* set it so io_req_local_work_add() would wake us up */ 2955 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 2956 atomic_set(&ctx->cq_wait_nr, 1); 2957 smp_mb(); 2958 } 2959 2960 /* failed during ring init, it couldn't have issued any requests */ 2961 if (!ctx->rings) 2962 return false; 2963 2964 if (!task) { 2965 ret |= io_uring_try_cancel_iowq(ctx); 2966 } else if (tctx && tctx->io_wq) { 2967 /* 2968 * Cancels requests of all rings, not only @ctx, but 2969 * it's fine as the task is in exit/exec. 2970 */ 2971 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, 2972 &cancel, true); 2973 ret |= (cret != IO_WQ_CANCEL_NOTFOUND); 2974 } 2975 2976 /* SQPOLL thread does its own polling */ 2977 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || 2978 (ctx->sq_data && ctx->sq_data->thread == current)) { 2979 while (!wq_list_empty(&ctx->iopoll_list)) { 2980 io_iopoll_try_reap_events(ctx); 2981 ret = true; 2982 cond_resched(); 2983 } 2984 } 2985 2986 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 2987 io_allowed_defer_tw_run(ctx)) 2988 ret |= io_run_local_work(ctx, INT_MAX) > 0; 2989 ret |= io_cancel_defer_files(ctx, task, cancel_all); 2990 mutex_lock(&ctx->uring_lock); 2991 ret |= io_poll_remove_all(ctx, task, cancel_all); 2992 ret |= io_waitid_remove_all(ctx, task, cancel_all); 2993 ret |= io_futex_remove_all(ctx, task, cancel_all); 2994 ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all); 2995 mutex_unlock(&ctx->uring_lock); 2996 ret |= io_kill_timeouts(ctx, task, cancel_all); 2997 if (task) 2998 ret |= io_run_task_work() > 0; 2999 else 3000 ret |= flush_delayed_work(&ctx->fallback_work); 3001 return ret; 3002 } 3003 3004 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) 3005 { 3006 if (tracked) 3007 return atomic_read(&tctx->inflight_tracked); 3008 return percpu_counter_sum(&tctx->inflight); 3009 } 3010 3011 /* 3012 * Find any io_uring ctx that this task has registered or done IO on, and cancel 3013 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. 3014 */ 3015 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) 3016 { 3017 struct io_uring_task *tctx = current->io_uring; 3018 struct io_ring_ctx *ctx; 3019 struct io_tctx_node *node; 3020 unsigned long index; 3021 s64 inflight; 3022 DEFINE_WAIT(wait); 3023 3024 WARN_ON_ONCE(sqd && sqd->thread != current); 3025 3026 if (!current->io_uring) 3027 return; 3028 if (tctx->io_wq) 3029 io_wq_exit_start(tctx->io_wq); 3030 3031 atomic_inc(&tctx->in_cancel); 3032 do { 3033 bool loop = false; 3034 3035 io_uring_drop_tctx_refs(current); 3036 if (!tctx_inflight(tctx, !cancel_all)) 3037 break; 3038 3039 /* read completions before cancelations */ 3040 inflight = tctx_inflight(tctx, false); 3041 if (!inflight) 3042 break; 3043 3044 if (!sqd) { 3045 xa_for_each(&tctx->xa, index, node) { 3046 /* sqpoll task will cancel all its requests */ 3047 if (node->ctx->sq_data) 3048 continue; 3049 loop |= io_uring_try_cancel_requests(node->ctx, 3050 current, cancel_all); 3051 } 3052 } else { 3053 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 3054 loop |= io_uring_try_cancel_requests(ctx, 3055 current, 3056 cancel_all); 3057 } 3058 3059 if (loop) { 3060 cond_resched(); 3061 continue; 3062 } 3063 3064 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); 3065 io_run_task_work(); 3066 io_uring_drop_tctx_refs(current); 3067 xa_for_each(&tctx->xa, index, node) { 3068 if (!llist_empty(&node->ctx->work_llist)) { 3069 WARN_ON_ONCE(node->ctx->submitter_task && 3070 node->ctx->submitter_task != current); 3071 goto end_wait; 3072 } 3073 } 3074 /* 3075 * If we've seen completions, retry without waiting. This 3076 * avoids a race where a completion comes in before we did 3077 * prepare_to_wait(). 3078 */ 3079 if (inflight == tctx_inflight(tctx, !cancel_all)) 3080 schedule(); 3081 end_wait: 3082 finish_wait(&tctx->wait, &wait); 3083 } while (1); 3084 3085 io_uring_clean_tctx(tctx); 3086 if (cancel_all) { 3087 /* 3088 * We shouldn't run task_works after cancel, so just leave 3089 * ->in_cancel set for normal exit. 3090 */ 3091 atomic_dec(&tctx->in_cancel); 3092 /* for exec all current's requests should be gone, kill tctx */ 3093 __io_uring_free(current); 3094 } 3095 } 3096 3097 void __io_uring_cancel(bool cancel_all) 3098 { 3099 io_uring_cancel_generic(cancel_all, NULL); 3100 } 3101 3102 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz) 3103 { 3104 if (flags & IORING_ENTER_EXT_ARG) { 3105 struct io_uring_getevents_arg arg; 3106 3107 if (argsz != sizeof(arg)) 3108 return -EINVAL; 3109 if (copy_from_user(&arg, argp, sizeof(arg))) 3110 return -EFAULT; 3111 } 3112 return 0; 3113 } 3114 3115 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, 3116 struct __kernel_timespec __user **ts, 3117 const sigset_t __user **sig) 3118 { 3119 struct io_uring_getevents_arg arg; 3120 3121 /* 3122 * If EXT_ARG isn't set, then we have no timespec and the argp pointer 3123 * is just a pointer to the sigset_t. 3124 */ 3125 if (!(flags & IORING_ENTER_EXT_ARG)) { 3126 *sig = (const sigset_t __user *) argp; 3127 *ts = NULL; 3128 return 0; 3129 } 3130 3131 /* 3132 * EXT_ARG is set - ensure we agree on the size of it and copy in our 3133 * timespec and sigset_t pointers if good. 3134 */ 3135 if (*argsz != sizeof(arg)) 3136 return -EINVAL; 3137 if (copy_from_user(&arg, argp, sizeof(arg))) 3138 return -EFAULT; 3139 if (arg.pad) 3140 return -EINVAL; 3141 *sig = u64_to_user_ptr(arg.sigmask); 3142 *argsz = arg.sigmask_sz; 3143 *ts = u64_to_user_ptr(arg.ts); 3144 return 0; 3145 } 3146 3147 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, 3148 u32, min_complete, u32, flags, const void __user *, argp, 3149 size_t, argsz) 3150 { 3151 struct io_ring_ctx *ctx; 3152 struct file *file; 3153 long ret; 3154 3155 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | 3156 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | 3157 IORING_ENTER_REGISTERED_RING))) 3158 return -EINVAL; 3159 3160 /* 3161 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we 3162 * need only dereference our task private array to find it. 3163 */ 3164 if (flags & IORING_ENTER_REGISTERED_RING) { 3165 struct io_uring_task *tctx = current->io_uring; 3166 3167 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) 3168 return -EINVAL; 3169 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); 3170 file = tctx->registered_rings[fd]; 3171 if (unlikely(!file)) 3172 return -EBADF; 3173 } else { 3174 file = fget(fd); 3175 if (unlikely(!file)) 3176 return -EBADF; 3177 ret = -EOPNOTSUPP; 3178 if (unlikely(!io_is_uring_fops(file))) 3179 goto out; 3180 } 3181 3182 ctx = file->private_data; 3183 ret = -EBADFD; 3184 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) 3185 goto out; 3186 3187 /* 3188 * For SQ polling, the thread will do all submissions and completions. 3189 * Just return the requested submit count, and wake the thread if 3190 * we were asked to. 3191 */ 3192 ret = 0; 3193 if (ctx->flags & IORING_SETUP_SQPOLL) { 3194 if (unlikely(ctx->sq_data->thread == NULL)) { 3195 ret = -EOWNERDEAD; 3196 goto out; 3197 } 3198 if (flags & IORING_ENTER_SQ_WAKEUP) 3199 wake_up(&ctx->sq_data->wait); 3200 if (flags & IORING_ENTER_SQ_WAIT) 3201 io_sqpoll_wait_sq(ctx); 3202 3203 ret = to_submit; 3204 } else if (to_submit) { 3205 ret = io_uring_add_tctx_node(ctx); 3206 if (unlikely(ret)) 3207 goto out; 3208 3209 mutex_lock(&ctx->uring_lock); 3210 ret = io_submit_sqes(ctx, to_submit); 3211 if (ret != to_submit) { 3212 mutex_unlock(&ctx->uring_lock); 3213 goto out; 3214 } 3215 if (flags & IORING_ENTER_GETEVENTS) { 3216 if (ctx->syscall_iopoll) 3217 goto iopoll_locked; 3218 /* 3219 * Ignore errors, we'll soon call io_cqring_wait() and 3220 * it should handle ownership problems if any. 3221 */ 3222 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 3223 (void)io_run_local_work_locked(ctx, min_complete); 3224 } 3225 mutex_unlock(&ctx->uring_lock); 3226 } 3227 3228 if (flags & IORING_ENTER_GETEVENTS) { 3229 int ret2; 3230 3231 if (ctx->syscall_iopoll) { 3232 /* 3233 * We disallow the app entering submit/complete with 3234 * polling, but we still need to lock the ring to 3235 * prevent racing with polled issue that got punted to 3236 * a workqueue. 3237 */ 3238 mutex_lock(&ctx->uring_lock); 3239 iopoll_locked: 3240 ret2 = io_validate_ext_arg(flags, argp, argsz); 3241 if (likely(!ret2)) { 3242 min_complete = min(min_complete, 3243 ctx->cq_entries); 3244 ret2 = io_iopoll_check(ctx, min_complete); 3245 } 3246 mutex_unlock(&ctx->uring_lock); 3247 } else { 3248 const sigset_t __user *sig; 3249 struct __kernel_timespec __user *ts; 3250 3251 ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); 3252 if (likely(!ret2)) { 3253 min_complete = min(min_complete, 3254 ctx->cq_entries); 3255 ret2 = io_cqring_wait(ctx, min_complete, sig, 3256 argsz, ts); 3257 } 3258 } 3259 3260 if (!ret) { 3261 ret = ret2; 3262 3263 /* 3264 * EBADR indicates that one or more CQE were dropped. 3265 * Once the user has been informed we can clear the bit 3266 * as they are obviously ok with those drops. 3267 */ 3268 if (unlikely(ret2 == -EBADR)) 3269 clear_bit(IO_CHECK_CQ_DROPPED_BIT, 3270 &ctx->check_cq); 3271 } 3272 } 3273 out: 3274 if (!(flags & IORING_ENTER_REGISTERED_RING)) 3275 fput(file); 3276 return ret; 3277 } 3278 3279 static const struct file_operations io_uring_fops = { 3280 .release = io_uring_release, 3281 .mmap = io_uring_mmap, 3282 .get_unmapped_area = io_uring_get_unmapped_area, 3283 #ifndef CONFIG_MMU 3284 .mmap_capabilities = io_uring_nommu_mmap_capabilities, 3285 #endif 3286 .poll = io_uring_poll, 3287 #ifdef CONFIG_PROC_FS 3288 .show_fdinfo = io_uring_show_fdinfo, 3289 #endif 3290 }; 3291 3292 bool io_is_uring_fops(struct file *file) 3293 { 3294 return file->f_op == &io_uring_fops; 3295 } 3296 3297 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, 3298 struct io_uring_params *p) 3299 { 3300 struct io_rings *rings; 3301 size_t size, sq_array_offset; 3302 void *ptr; 3303 3304 /* make sure these are sane, as we already accounted them */ 3305 ctx->sq_entries = p->sq_entries; 3306 ctx->cq_entries = p->cq_entries; 3307 3308 size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset); 3309 if (size == SIZE_MAX) 3310 return -EOVERFLOW; 3311 3312 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3313 rings = io_pages_map(&ctx->ring_pages, &ctx->n_ring_pages, size); 3314 else 3315 rings = io_rings_map(ctx, p->cq_off.user_addr, size); 3316 3317 if (IS_ERR(rings)) 3318 return PTR_ERR(rings); 3319 3320 ctx->rings = rings; 3321 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) 3322 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); 3323 rings->sq_ring_mask = p->sq_entries - 1; 3324 rings->cq_ring_mask = p->cq_entries - 1; 3325 rings->sq_ring_entries = p->sq_entries; 3326 rings->cq_ring_entries = p->cq_entries; 3327 3328 if (p->flags & IORING_SETUP_SQE128) 3329 size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); 3330 else 3331 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); 3332 if (size == SIZE_MAX) { 3333 io_rings_free(ctx); 3334 return -EOVERFLOW; 3335 } 3336 3337 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3338 ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size); 3339 else 3340 ptr = io_sqes_map(ctx, p->sq_off.user_addr, size); 3341 3342 if (IS_ERR(ptr)) { 3343 io_rings_free(ctx); 3344 return PTR_ERR(ptr); 3345 } 3346 3347 ctx->sq_sqes = ptr; 3348 return 0; 3349 } 3350 3351 static int io_uring_install_fd(struct file *file) 3352 { 3353 int fd; 3354 3355 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); 3356 if (fd < 0) 3357 return fd; 3358 fd_install(fd, file); 3359 return fd; 3360 } 3361 3362 /* 3363 * Allocate an anonymous fd, this is what constitutes the application 3364 * visible backing of an io_uring instance. The application mmaps this 3365 * fd to gain access to the SQ/CQ ring details. 3366 */ 3367 static struct file *io_uring_get_file(struct io_ring_ctx *ctx) 3368 { 3369 /* Create a new inode so that the LSM can block the creation. */ 3370 return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx, 3371 O_RDWR | O_CLOEXEC, NULL); 3372 } 3373 3374 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, 3375 struct io_uring_params __user *params) 3376 { 3377 struct io_ring_ctx *ctx; 3378 struct io_uring_task *tctx; 3379 struct file *file; 3380 int ret; 3381 3382 if (!entries) 3383 return -EINVAL; 3384 if (entries > IORING_MAX_ENTRIES) { 3385 if (!(p->flags & IORING_SETUP_CLAMP)) 3386 return -EINVAL; 3387 entries = IORING_MAX_ENTRIES; 3388 } 3389 3390 if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY) 3391 && !(p->flags & IORING_SETUP_NO_MMAP)) 3392 return -EINVAL; 3393 3394 /* 3395 * Use twice as many entries for the CQ ring. It's possible for the 3396 * application to drive a higher depth than the size of the SQ ring, 3397 * since the sqes are only used at submission time. This allows for 3398 * some flexibility in overcommitting a bit. If the application has 3399 * set IORING_SETUP_CQSIZE, it will have passed in the desired number 3400 * of CQ ring entries manually. 3401 */ 3402 p->sq_entries = roundup_pow_of_two(entries); 3403 if (p->flags & IORING_SETUP_CQSIZE) { 3404 /* 3405 * If IORING_SETUP_CQSIZE is set, we do the same roundup 3406 * to a power-of-two, if it isn't already. We do NOT impose 3407 * any cq vs sq ring sizing. 3408 */ 3409 if (!p->cq_entries) 3410 return -EINVAL; 3411 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { 3412 if (!(p->flags & IORING_SETUP_CLAMP)) 3413 return -EINVAL; 3414 p->cq_entries = IORING_MAX_CQ_ENTRIES; 3415 } 3416 p->cq_entries = roundup_pow_of_two(p->cq_entries); 3417 if (p->cq_entries < p->sq_entries) 3418 return -EINVAL; 3419 } else { 3420 p->cq_entries = 2 * p->sq_entries; 3421 } 3422 3423 ctx = io_ring_ctx_alloc(p); 3424 if (!ctx) 3425 return -ENOMEM; 3426 3427 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 3428 !(ctx->flags & IORING_SETUP_IOPOLL) && 3429 !(ctx->flags & IORING_SETUP_SQPOLL)) 3430 ctx->task_complete = true; 3431 3432 if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) 3433 ctx->lockless_cq = true; 3434 3435 /* 3436 * lazy poll_wq activation relies on ->task_complete for synchronisation 3437 * purposes, see io_activate_pollwq() 3438 */ 3439 if (!ctx->task_complete) 3440 ctx->poll_activated = true; 3441 3442 /* 3443 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user 3444 * space applications don't need to do io completion events 3445 * polling again, they can rely on io_sq_thread to do polling 3446 * work, which can reduce cpu usage and uring_lock contention. 3447 */ 3448 if (ctx->flags & IORING_SETUP_IOPOLL && 3449 !(ctx->flags & IORING_SETUP_SQPOLL)) 3450 ctx->syscall_iopoll = 1; 3451 3452 ctx->compat = in_compat_syscall(); 3453 if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK)) 3454 ctx->user = get_uid(current_user()); 3455 3456 /* 3457 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if 3458 * COOP_TASKRUN is set, then IPIs are never needed by the app. 3459 */ 3460 ret = -EINVAL; 3461 if (ctx->flags & IORING_SETUP_SQPOLL) { 3462 /* IPI related flags don't make sense with SQPOLL */ 3463 if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | 3464 IORING_SETUP_TASKRUN_FLAG | 3465 IORING_SETUP_DEFER_TASKRUN)) 3466 goto err; 3467 ctx->notify_method = TWA_SIGNAL_NO_IPI; 3468 } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { 3469 ctx->notify_method = TWA_SIGNAL_NO_IPI; 3470 } else { 3471 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG && 3472 !(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 3473 goto err; 3474 ctx->notify_method = TWA_SIGNAL; 3475 } 3476 3477 /* 3478 * For DEFER_TASKRUN we require the completion task to be the same as the 3479 * submission task. This implies that there is only one submitter, so enforce 3480 * that. 3481 */ 3482 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN && 3483 !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) { 3484 goto err; 3485 } 3486 3487 /* 3488 * This is just grabbed for accounting purposes. When a process exits, 3489 * the mm is exited and dropped before the files, hence we need to hang 3490 * on to this mm purely for the purposes of being able to unaccount 3491 * memory (locked/pinned vm). It's not used for anything else. 3492 */ 3493 mmgrab(current->mm); 3494 ctx->mm_account = current->mm; 3495 3496 ret = io_allocate_scq_urings(ctx, p); 3497 if (ret) 3498 goto err; 3499 3500 ret = io_sq_offload_create(ctx, p); 3501 if (ret) 3502 goto err; 3503 3504 ret = io_rsrc_init(ctx); 3505 if (ret) 3506 goto err; 3507 3508 p->sq_off.head = offsetof(struct io_rings, sq.head); 3509 p->sq_off.tail = offsetof(struct io_rings, sq.tail); 3510 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); 3511 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); 3512 p->sq_off.flags = offsetof(struct io_rings, sq_flags); 3513 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); 3514 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) 3515 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; 3516 p->sq_off.resv1 = 0; 3517 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3518 p->sq_off.user_addr = 0; 3519 3520 p->cq_off.head = offsetof(struct io_rings, cq.head); 3521 p->cq_off.tail = offsetof(struct io_rings, cq.tail); 3522 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); 3523 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); 3524 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); 3525 p->cq_off.cqes = offsetof(struct io_rings, cqes); 3526 p->cq_off.flags = offsetof(struct io_rings, cq_flags); 3527 p->cq_off.resv1 = 0; 3528 if (!(ctx->flags & IORING_SETUP_NO_MMAP)) 3529 p->cq_off.user_addr = 0; 3530 3531 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | 3532 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | 3533 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | 3534 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | 3535 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | 3536 IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | 3537 IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING | 3538 IORING_FEAT_RECVSEND_BUNDLE; 3539 3540 if (copy_to_user(params, p, sizeof(*p))) { 3541 ret = -EFAULT; 3542 goto err; 3543 } 3544 3545 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER 3546 && !(ctx->flags & IORING_SETUP_R_DISABLED)) 3547 WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 3548 3549 file = io_uring_get_file(ctx); 3550 if (IS_ERR(file)) { 3551 ret = PTR_ERR(file); 3552 goto err; 3553 } 3554 3555 ret = __io_uring_add_tctx_node(ctx); 3556 if (ret) 3557 goto err_fput; 3558 tctx = current->io_uring; 3559 3560 /* 3561 * Install ring fd as the very last thing, so we don't risk someone 3562 * having closed it before we finish setup 3563 */ 3564 if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) 3565 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX); 3566 else 3567 ret = io_uring_install_fd(file); 3568 if (ret < 0) 3569 goto err_fput; 3570 3571 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); 3572 return ret; 3573 err: 3574 io_ring_ctx_wait_and_kill(ctx); 3575 return ret; 3576 err_fput: 3577 fput(file); 3578 return ret; 3579 } 3580 3581 /* 3582 * Sets up an aio uring context, and returns the fd. Applications asks for a 3583 * ring size, we return the actual sq/cq ring sizes (among other things) in the 3584 * params structure passed in. 3585 */ 3586 static long io_uring_setup(u32 entries, struct io_uring_params __user *params) 3587 { 3588 struct io_uring_params p; 3589 int i; 3590 3591 if (copy_from_user(&p, params, sizeof(p))) 3592 return -EFAULT; 3593 for (i = 0; i < ARRAY_SIZE(p.resv); i++) { 3594 if (p.resv[i]) 3595 return -EINVAL; 3596 } 3597 3598 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | 3599 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | 3600 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | 3601 IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | 3602 IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | 3603 IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | 3604 IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | 3605 IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | 3606 IORING_SETUP_NO_SQARRAY)) 3607 return -EINVAL; 3608 3609 return io_uring_create(entries, &p, params); 3610 } 3611 3612 static inline bool io_uring_allowed(void) 3613 { 3614 int disabled = READ_ONCE(sysctl_io_uring_disabled); 3615 kgid_t io_uring_group; 3616 3617 if (disabled == 2) 3618 return false; 3619 3620 if (disabled == 0 || capable(CAP_SYS_ADMIN)) 3621 return true; 3622 3623 io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group); 3624 if (!gid_valid(io_uring_group)) 3625 return false; 3626 3627 return in_group_p(io_uring_group); 3628 } 3629 3630 SYSCALL_DEFINE2(io_uring_setup, u32, entries, 3631 struct io_uring_params __user *, params) 3632 { 3633 if (!io_uring_allowed()) 3634 return -EPERM; 3635 3636 return io_uring_setup(entries, params); 3637 } 3638 3639 static int __init io_uring_init(void) 3640 { 3641 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ 3642 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ 3643 BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ 3644 } while (0) 3645 3646 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ 3647 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) 3648 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ 3649 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) 3650 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); 3651 BUILD_BUG_SQE_ELEM(0, __u8, opcode); 3652 BUILD_BUG_SQE_ELEM(1, __u8, flags); 3653 BUILD_BUG_SQE_ELEM(2, __u16, ioprio); 3654 BUILD_BUG_SQE_ELEM(4, __s32, fd); 3655 BUILD_BUG_SQE_ELEM(8, __u64, off); 3656 BUILD_BUG_SQE_ELEM(8, __u64, addr2); 3657 BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); 3658 BUILD_BUG_SQE_ELEM(12, __u32, __pad1); 3659 BUILD_BUG_SQE_ELEM(16, __u64, addr); 3660 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); 3661 BUILD_BUG_SQE_ELEM(24, __u32, len); 3662 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); 3663 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); 3664 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); 3665 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); 3666 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); 3667 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); 3668 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); 3669 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); 3670 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); 3671 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); 3672 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); 3673 BUILD_BUG_SQE_ELEM(28, __u32, open_flags); 3674 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); 3675 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); 3676 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); 3677 BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); 3678 BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); 3679 BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); 3680 BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); 3681 BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); 3682 BUILD_BUG_SQE_ELEM(32, __u64, user_data); 3683 BUILD_BUG_SQE_ELEM(40, __u16, buf_index); 3684 BUILD_BUG_SQE_ELEM(40, __u16, buf_group); 3685 BUILD_BUG_SQE_ELEM(42, __u16, personality); 3686 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); 3687 BUILD_BUG_SQE_ELEM(44, __u32, file_index); 3688 BUILD_BUG_SQE_ELEM(44, __u16, addr_len); 3689 BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); 3690 BUILD_BUG_SQE_ELEM(48, __u64, addr3); 3691 BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); 3692 BUILD_BUG_SQE_ELEM(56, __u64, __pad2); 3693 3694 BUILD_BUG_ON(sizeof(struct io_uring_files_update) != 3695 sizeof(struct io_uring_rsrc_update)); 3696 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > 3697 sizeof(struct io_uring_rsrc_update2)); 3698 3699 /* ->buf_index is u16 */ 3700 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); 3701 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != 3702 offsetof(struct io_uring_buf_ring, tail)); 3703 3704 /* should fit into one byte */ 3705 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); 3706 BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); 3707 BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); 3708 3709 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags)); 3710 3711 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); 3712 3713 /* top 8bits are for internal use */ 3714 BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0); 3715 3716 io_uring_optable_init(); 3717 3718 /* 3719 * Allow user copy in the per-command field, which starts after the 3720 * file in io_kiocb and until the opcode field. The openat2 handling 3721 * requires copying in user memory into the io_kiocb object in that 3722 * range, and HARDENED_USERCOPY will complain if we haven't 3723 * correctly annotated this range. 3724 */ 3725 req_cachep = kmem_cache_create_usercopy("io_kiocb", 3726 sizeof(struct io_kiocb), 0, 3727 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 3728 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU, 3729 offsetof(struct io_kiocb, cmd.data), 3730 sizeof_field(struct io_kiocb, cmd.data), NULL); 3731 io_buf_cachep = KMEM_CACHE(io_buffer, 3732 SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); 3733 3734 iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); 3735 3736 #ifdef CONFIG_SYSCTL 3737 register_sysctl_init("kernel", kernel_io_uring_disabled_table); 3738 #endif 3739 3740 return 0; 3741 }; 3742 __initcall(io_uring_init); 3743