1 #ifndef IO_URING_TYPES_H 2 #define IO_URING_TYPES_H 3 4 #include <linux/blkdev.h> 5 #include <linux/hashtable.h> 6 #include <linux/task_work.h> 7 #include <linux/bitmap.h> 8 #include <linux/llist.h> 9 #include <uapi/linux/io_uring.h> 10 11 enum { 12 /* 13 * A hint to not wake right away but delay until there are enough of 14 * tw's queued to match the number of CQEs the task is waiting for. 15 * 16 * Must not be used with requests generating more than one CQE. 17 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. 18 */ 19 IOU_F_TWQ_LAZY_WAKE = 1, 20 }; 21 22 enum io_uring_cmd_flags { 23 IO_URING_F_COMPLETE_DEFER = 1, 24 IO_URING_F_UNLOCKED = 2, 25 /* the request is executed from poll, it should not be freed */ 26 IO_URING_F_MULTISHOT = 4, 27 /* executed by io-wq */ 28 IO_URING_F_IOWQ = 8, 29 /* executed inline from syscall */ 30 IO_URING_F_INLINE = 16, 31 /* int's last bit, sign checks are usually faster than a bit test */ 32 IO_URING_F_NONBLOCK = INT_MIN, 33 34 /* ctx state flags, for URING_CMD */ 35 IO_URING_F_SQE128 = (1 << 8), 36 IO_URING_F_CQE32 = (1 << 9), 37 IO_URING_F_IOPOLL = (1 << 10), 38 39 /* set when uring wants to cancel a previously issued command */ 40 IO_URING_F_CANCEL = (1 << 11), 41 IO_URING_F_COMPAT = (1 << 12), 42 }; 43 44 struct io_wq_work_node { 45 struct io_wq_work_node *next; 46 }; 47 48 struct io_wq_work_list { 49 struct io_wq_work_node *first; 50 struct io_wq_work_node *last; 51 }; 52 53 struct io_wq_work { 54 struct io_wq_work_node list; 55 atomic_t flags; 56 /* place it here instead of io_kiocb as it fills padding and saves 4B */ 57 int cancel_seq; 58 }; 59 60 struct io_rsrc_data { 61 unsigned int nr; 62 struct io_rsrc_node **nodes; 63 }; 64 65 struct io_file_table { 66 struct io_rsrc_data data; 67 unsigned long *bitmap; 68 unsigned int alloc_hint; 69 }; 70 71 struct io_hash_bucket { 72 struct hlist_head list; 73 } ____cacheline_aligned_in_smp; 74 75 struct io_hash_table { 76 struct io_hash_bucket *hbs; 77 unsigned hash_bits; 78 }; 79 80 struct io_mapped_region { 81 struct page **pages; 82 void *ptr; 83 unsigned nr_pages; 84 unsigned flags; 85 }; 86 87 /* 88 * Return value from io_buffer_list selection, to avoid stashing it in 89 * struct io_kiocb. For legacy/classic provided buffers, keeping a reference 90 * across execution contexts are fine. But for ring provided buffers, the 91 * list may go away as soon as ->uring_lock is dropped. As the io_kiocb 92 * persists, it's better to just keep the buffer local for those cases. 93 */ 94 struct io_br_sel { 95 struct io_buffer_list *buf_list; 96 /* 97 * Some selection parts return the user address, others return an error. 98 */ 99 union { 100 void __user *addr; 101 ssize_t val; 102 }; 103 }; 104 105 106 /* 107 * Arbitrary limit, can be raised if need be 108 */ 109 #define IO_RINGFD_REG_MAX 16 110 111 struct io_uring_task { 112 /* submission side */ 113 int cached_refs; 114 const struct io_ring_ctx *last; 115 struct task_struct *task; 116 struct io_wq *io_wq; 117 struct file *registered_rings[IO_RINGFD_REG_MAX]; 118 119 struct xarray xa; 120 struct wait_queue_head wait; 121 atomic_t in_cancel; 122 atomic_t inflight_tracked; 123 struct percpu_counter inflight; 124 125 struct { /* task_work */ 126 struct llist_head task_list; 127 struct callback_head task_work; 128 } ____cacheline_aligned_in_smp; 129 }; 130 131 struct iou_vec { 132 union { 133 struct iovec *iovec; 134 struct bio_vec *bvec; 135 }; 136 unsigned nr; /* number of struct iovec it can hold */ 137 }; 138 139 struct io_uring { 140 u32 head; 141 u32 tail; 142 }; 143 144 /* 145 * This data is shared with the application through the mmap at offsets 146 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. 147 * 148 * The offsets to the member fields are published through struct 149 * io_sqring_offsets when calling io_uring_setup. 150 */ 151 struct io_rings { 152 /* 153 * Head and tail offsets into the ring; the offsets need to be 154 * masked to get valid indices. 155 * 156 * The kernel controls head of the sq ring and the tail of the cq ring, 157 * and the application controls tail of the sq ring and the head of the 158 * cq ring. 159 */ 160 struct io_uring sq, cq; 161 /* 162 * Bitmasks to apply to head and tail offsets (constant, equals 163 * ring_entries - 1) 164 */ 165 u32 sq_ring_mask, cq_ring_mask; 166 /* Ring sizes (constant, power of 2) */ 167 u32 sq_ring_entries, cq_ring_entries; 168 /* 169 * Number of invalid entries dropped by the kernel due to 170 * invalid index stored in array 171 * 172 * Written by the kernel, shouldn't be modified by the 173 * application (i.e. get number of "new events" by comparing to 174 * cached value). 175 * 176 * After a new SQ head value was read by the application this 177 * counter includes all submissions that were dropped reaching 178 * the new SQ head (and possibly more). 179 */ 180 u32 sq_dropped; 181 /* 182 * Runtime SQ flags 183 * 184 * Written by the kernel, shouldn't be modified by the 185 * application. 186 * 187 * The application needs a full memory barrier before checking 188 * for IORING_SQ_NEED_WAKEUP after updating the sq tail. 189 */ 190 atomic_t sq_flags; 191 /* 192 * Runtime CQ flags 193 * 194 * Written by the application, shouldn't be modified by the 195 * kernel. 196 */ 197 u32 cq_flags; 198 /* 199 * Number of completion events lost because the queue was full; 200 * this should be avoided by the application by making sure 201 * there are not more requests pending than there is space in 202 * the completion queue. 203 * 204 * Written by the kernel, shouldn't be modified by the 205 * application (i.e. get number of "new events" by comparing to 206 * cached value). 207 * 208 * As completion events come in out of order this counter is not 209 * ordered with any other data. 210 */ 211 u32 cq_overflow; 212 /* 213 * Ring buffer of completion events. 214 * 215 * The kernel writes completion events fresh every time they are 216 * produced, so the application is allowed to modify pending 217 * entries. 218 */ 219 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; 220 }; 221 222 struct io_bpf_filter; 223 struct io_bpf_filters { 224 refcount_t refs; /* ref for ->bpf_filters */ 225 spinlock_t lock; /* protects ->bpf_filters modifications */ 226 struct io_bpf_filter __rcu **filters; 227 struct rcu_head rcu_head; 228 }; 229 230 struct io_restriction { 231 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); 232 DECLARE_BITMAP(sqe_op, IORING_OP_LAST); 233 struct io_bpf_filters *bpf_filters; 234 /* ->bpf_filters needs COW on modification */ 235 bool bpf_filters_cow; 236 u8 sqe_flags_allowed; 237 u8 sqe_flags_required; 238 /* IORING_OP_* restrictions exist */ 239 bool op_registered; 240 /* IORING_REGISTER_* restrictions exist */ 241 bool reg_registered; 242 }; 243 244 struct io_submit_link { 245 struct io_kiocb *head; 246 struct io_kiocb *last; 247 }; 248 249 struct io_submit_state { 250 /* inline/task_work completion list, under ->uring_lock */ 251 struct io_wq_work_node free_list; 252 /* batch completion logic */ 253 struct io_wq_work_list compl_reqs; 254 struct io_submit_link link; 255 256 bool plug_started; 257 bool need_plug; 258 bool cq_flush; 259 unsigned short submit_nr; 260 struct blk_plug plug; 261 }; 262 263 struct io_alloc_cache { 264 void **entries; 265 unsigned int nr_cached; 266 unsigned int max_cached; 267 unsigned int elem_size; 268 unsigned int init_clear; 269 }; 270 271 struct io_ring_ctx { 272 /* const or read-mostly hot data */ 273 struct { 274 unsigned int flags; 275 unsigned int drain_next: 1; 276 unsigned int op_restricted: 1; 277 unsigned int reg_restricted: 1; 278 unsigned int off_timeout_used: 1; 279 unsigned int drain_active: 1; 280 unsigned int has_evfd: 1; 281 /* all CQEs should be posted only by the submitter task */ 282 unsigned int task_complete: 1; 283 unsigned int lockless_cq: 1; 284 unsigned int syscall_iopoll: 1; 285 unsigned int poll_activated: 1; 286 unsigned int drain_disabled: 1; 287 unsigned int compat: 1; 288 unsigned int iowq_limits_set : 1; 289 290 struct task_struct *submitter_task; 291 struct io_rings *rings; 292 /* cache of ->restrictions.bpf_filters->filters */ 293 struct io_bpf_filter __rcu **bpf_filters; 294 struct percpu_ref refs; 295 296 clockid_t clockid; 297 enum tk_offsets clock_offset; 298 299 enum task_work_notify_mode notify_method; 300 unsigned sq_thread_idle; 301 } ____cacheline_aligned_in_smp; 302 303 /* submission data */ 304 struct { 305 struct mutex uring_lock; 306 307 /* 308 * Ring buffer of indices into array of io_uring_sqe, which is 309 * mmapped by the application using the IORING_OFF_SQES offset. 310 * 311 * This indirection could e.g. be used to assign fixed 312 * io_uring_sqe entries to operations and only submit them to 313 * the queue when needed. 314 * 315 * The kernel modifies neither the indices array nor the entries 316 * array. 317 */ 318 u32 *sq_array; 319 struct io_uring_sqe *sq_sqes; 320 unsigned cached_sq_head; 321 unsigned sq_entries; 322 323 /* 324 * Fixed resources fast path, should be accessed only under 325 * uring_lock, and updated through io_uring_register(2) 326 */ 327 atomic_t cancel_seq; 328 329 /* 330 * ->iopoll_list is protected by the ctx->uring_lock for 331 * io_uring instances that don't use IORING_SETUP_SQPOLL. 332 * For SQPOLL, only the single threaded io_sq_thread() will 333 * manipulate the list, hence no extra locking is needed there. 334 */ 335 bool poll_multi_queue; 336 struct list_head iopoll_list; 337 338 struct io_file_table file_table; 339 struct io_rsrc_data buf_table; 340 struct io_alloc_cache node_cache; 341 struct io_alloc_cache imu_cache; 342 343 struct io_submit_state submit_state; 344 345 /* 346 * Modifications are protected by ->uring_lock and ->mmap_lock. 347 * The buffer list's io mapped region should be stable once 348 * published. 349 */ 350 struct xarray io_bl_xa; 351 352 struct io_hash_table cancel_table; 353 struct io_alloc_cache apoll_cache; 354 struct io_alloc_cache netmsg_cache; 355 struct io_alloc_cache rw_cache; 356 struct io_alloc_cache cmd_cache; 357 358 /* 359 * Any cancelable uring_cmd is added to this list in 360 * ->uring_cmd() by io_uring_cmd_insert_cancelable() 361 */ 362 struct hlist_head cancelable_uring_cmd; 363 /* 364 * For Hybrid IOPOLL, runtime in hybrid polling, without 365 * scheduling time 366 */ 367 u64 hybrid_poll_time; 368 } ____cacheline_aligned_in_smp; 369 370 struct { 371 /* 372 * We cache a range of free CQEs we can use, once exhausted it 373 * should go through a slower range setup, see __io_get_cqe() 374 */ 375 struct io_uring_cqe *cqe_cached; 376 struct io_uring_cqe *cqe_sentinel; 377 378 unsigned cached_cq_tail; 379 unsigned cq_entries; 380 struct io_ev_fd __rcu *io_ev_fd; 381 382 void *cq_wait_arg; 383 size_t cq_wait_size; 384 } ____cacheline_aligned_in_smp; 385 386 /* 387 * task_work and async notification delivery cacheline. Expected to 388 * regularly bounce b/w CPUs. 389 */ 390 struct { 391 struct llist_head work_llist; 392 struct llist_head retry_llist; 393 unsigned long check_cq; 394 atomic_t cq_wait_nr; 395 atomic_t cq_timeouts; 396 struct wait_queue_head cq_wait; 397 } ____cacheline_aligned_in_smp; 398 399 /* timeouts */ 400 struct { 401 raw_spinlock_t timeout_lock; 402 struct list_head timeout_list; 403 struct list_head ltimeout_list; 404 unsigned cq_last_tm_flush; 405 } ____cacheline_aligned_in_smp; 406 407 spinlock_t completion_lock; 408 409 struct list_head cq_overflow_list; 410 411 struct hlist_head waitid_list; 412 413 #ifdef CONFIG_FUTEX 414 struct hlist_head futex_list; 415 struct io_alloc_cache futex_cache; 416 #endif 417 418 const struct cred *sq_creds; /* cred used for __io_sq_thread() */ 419 struct io_sq_data *sq_data; /* if using sq thread polling */ 420 421 struct wait_queue_head sqo_sq_wait; 422 struct list_head sqd_list; 423 424 unsigned int file_alloc_start; 425 unsigned int file_alloc_end; 426 427 /* Keep this last, we don't need it for the fast path */ 428 struct wait_queue_head poll_wq; 429 struct io_restriction restrictions; 430 431 /* Stores zcrx object pointers of type struct io_zcrx_ifq */ 432 struct xarray zcrx_ctxs; 433 434 u32 pers_next; 435 struct xarray personalities; 436 437 /* hashed buffered write serialization */ 438 struct io_wq_hash *hash_map; 439 440 /* Only used for accounting purposes */ 441 struct user_struct *user; 442 struct mm_struct *mm_account; 443 444 /* 445 * List of tctx nodes for this ctx, protected by tctx_lock. For 446 * cancelation purposes, nests under uring_lock. 447 */ 448 struct list_head tctx_list; 449 struct mutex tctx_lock; 450 451 /* ctx exit and cancelation */ 452 struct llist_head fallback_llist; 453 struct delayed_work fallback_work; 454 struct work_struct exit_work; 455 struct completion ref_comp; 456 457 /* io-wq management, e.g. thread count */ 458 u32 iowq_limits[2]; 459 460 struct callback_head poll_wq_task_work; 461 struct list_head defer_list; 462 unsigned nr_drained; 463 464 /* protected by ->completion_lock */ 465 unsigned nr_req_allocated; 466 467 #ifdef CONFIG_NET_RX_BUSY_POLL 468 struct list_head napi_list; /* track busy poll napi_id */ 469 spinlock_t napi_lock; /* napi_list lock */ 470 471 /* napi busy poll default timeout */ 472 ktime_t napi_busy_poll_dt; 473 bool napi_prefer_busy_poll; 474 u8 napi_track_mode; 475 476 DECLARE_HASHTABLE(napi_ht, 4); 477 #endif 478 479 /* 480 * Protection for resize vs mmap races - both the mmap and resize 481 * side will need to grab this lock, to prevent either side from 482 * being run concurrently with the other. 483 */ 484 struct mutex mmap_lock; 485 486 struct io_mapped_region sq_region; 487 struct io_mapped_region ring_region; 488 /* used for optimised request parameter and wait argument passing */ 489 struct io_mapped_region param_region; 490 }; 491 492 /* 493 * Token indicating function is called in task work context: 494 * ctx->uring_lock is held and any completions generated will be flushed. 495 * ONLY core io_uring.c should instantiate this struct. 496 */ 497 struct io_tw_state { 498 bool cancel; 499 }; 500 /* Alias to use in code that doesn't instantiate struct io_tw_state */ 501 typedef struct io_tw_state io_tw_token_t; 502 503 enum { 504 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, 505 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, 506 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, 507 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, 508 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, 509 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, 510 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, 511 512 /* first byte is taken by user flags, shift it to not overlap */ 513 REQ_F_FAIL_BIT = 8, 514 REQ_F_INFLIGHT_BIT, 515 REQ_F_CUR_POS_BIT, 516 REQ_F_NOWAIT_BIT, 517 REQ_F_LINK_TIMEOUT_BIT, 518 REQ_F_NEED_CLEANUP_BIT, 519 REQ_F_POLLED_BIT, 520 REQ_F_HYBRID_IOPOLL_STATE_BIT, 521 REQ_F_BUFFER_SELECTED_BIT, 522 REQ_F_BUFFER_RING_BIT, 523 REQ_F_REISSUE_BIT, 524 REQ_F_CREDS_BIT, 525 REQ_F_REFCOUNT_BIT, 526 REQ_F_ARM_LTIMEOUT_BIT, 527 REQ_F_ASYNC_DATA_BIT, 528 REQ_F_SKIP_LINK_CQES_BIT, 529 REQ_F_SINGLE_POLL_BIT, 530 REQ_F_DOUBLE_POLL_BIT, 531 REQ_F_MULTISHOT_BIT, 532 REQ_F_APOLL_MULTISHOT_BIT, 533 REQ_F_CLEAR_POLLIN_BIT, 534 /* keep async read/write and isreg together and in order */ 535 REQ_F_SUPPORT_NOWAIT_BIT, 536 REQ_F_ISREG_BIT, 537 REQ_F_POLL_NO_LAZY_BIT, 538 REQ_F_CAN_POLL_BIT, 539 REQ_F_BL_EMPTY_BIT, 540 REQ_F_BL_NO_RECYCLE_BIT, 541 REQ_F_BUFFERS_COMMIT_BIT, 542 REQ_F_BUF_NODE_BIT, 543 REQ_F_HAS_METADATA_BIT, 544 REQ_F_IMPORT_BUFFER_BIT, 545 REQ_F_SQE_COPIED_BIT, 546 547 /* not a real bit, just to check we're not overflowing the space */ 548 __REQ_F_LAST_BIT, 549 }; 550 551 typedef u64 __bitwise io_req_flags_t; 552 #define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno))) 553 554 enum { 555 /* ctx owns file */ 556 REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT), 557 /* drain existing IO first */ 558 REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT), 559 /* linked sqes */ 560 REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT), 561 /* doesn't sever on completion < 0 */ 562 REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT), 563 /* IOSQE_ASYNC */ 564 REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT), 565 /* IOSQE_BUFFER_SELECT */ 566 REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT), 567 /* IOSQE_CQE_SKIP_SUCCESS */ 568 REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT), 569 570 /* fail rest of links */ 571 REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT), 572 /* on inflight list, should be cancelled and waited on exit reliably */ 573 REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT), 574 /* read/write uses file position */ 575 REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT), 576 /* must not punt to workers */ 577 REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT), 578 /* has or had linked timeout */ 579 REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT), 580 /* needs cleanup */ 581 REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT), 582 /* already went through poll handler */ 583 REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT), 584 /* every req only blocks once in hybrid poll */ 585 REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT), 586 /* buffer already selected */ 587 REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT), 588 /* buffer selected from ring, needs commit */ 589 REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT), 590 /* caller should reissue async */ 591 REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT), 592 /* supports async reads/writes */ 593 REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT), 594 /* regular file */ 595 REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT), 596 /* has creds assigned */ 597 REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT), 598 /* skip refcounting if not set */ 599 REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT), 600 /* there is a linked timeout that has to be armed */ 601 REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT), 602 /* ->async_data allocated */ 603 REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT), 604 /* don't post CQEs while failing linked requests */ 605 REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT), 606 /* single poll may be active */ 607 REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT), 608 /* double poll may active */ 609 REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT), 610 /* request posts multiple completions, should be set at prep time */ 611 REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT), 612 /* fast poll multishot mode */ 613 REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), 614 /* recvmsg special flag, clear EPOLLIN */ 615 REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), 616 /* don't use lazy poll wake for this request */ 617 REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), 618 /* file is pollable */ 619 REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT), 620 /* buffer list was empty after selection of buffer */ 621 REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT), 622 /* don't recycle provided buffers for this request */ 623 REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), 624 /* buffer ring head needs incrementing on put */ 625 REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), 626 /* buf node is valid */ 627 REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT), 628 /* request has read/write metadata assigned */ 629 REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT), 630 /* 631 * For vectored fixed buffers, resolve iovec to registered buffers. 632 * For SEND_ZC, whether to import buffers (i.e. the first issue). 633 */ 634 REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT), 635 /* ->sqe_copy() has been called, if necessary */ 636 REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT), 637 }; 638 639 struct io_tw_req { 640 struct io_kiocb *req; 641 }; 642 643 typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw); 644 645 struct io_task_work { 646 struct llist_node node; 647 io_req_tw_func_t func; 648 }; 649 650 struct io_cqe { 651 __u64 user_data; 652 __s32 res; 653 /* fd initially, then cflags for completion */ 654 union { 655 __u32 flags; 656 int fd; 657 }; 658 }; 659 660 /* 661 * Each request type overlays its private data structure on top of this one. 662 * They must not exceed this one in size. 663 */ 664 struct io_cmd_data { 665 struct file *file; 666 /* each command gets 56 bytes of data */ 667 __u8 data[56]; 668 }; 669 670 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) 671 { 672 BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); 673 } 674 #define io_kiocb_to_cmd(req, cmd_type) ( \ 675 io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ 676 ((cmd_type *)&(req)->cmd) \ 677 ) 678 679 static inline struct io_kiocb *cmd_to_io_kiocb(void *ptr) 680 { 681 return ptr; 682 } 683 684 struct io_kiocb { 685 union { 686 /* 687 * NOTE! Each of the io_kiocb union members has the file pointer 688 * as the first entry in their struct definition. So you can 689 * access the file pointer through any of the sub-structs, 690 * or directly as just 'file' in this struct. 691 */ 692 struct file *file; 693 struct io_cmd_data cmd; 694 }; 695 696 u8 opcode; 697 /* polled IO has completed */ 698 u8 iopoll_completed; 699 /* 700 * Can be either a fixed buffer index, or used with provided buffers. 701 * For the latter, it points to the selected buffer ID. 702 */ 703 u16 buf_index; 704 705 unsigned nr_tw; 706 707 /* REQ_F_* flags */ 708 io_req_flags_t flags; 709 710 struct io_cqe cqe; 711 712 struct io_ring_ctx *ctx; 713 struct io_uring_task *tctx; 714 715 union { 716 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ 717 struct io_buffer *kbuf; 718 719 struct io_rsrc_node *buf_node; 720 }; 721 722 union { 723 /* used by request caches, completion batching and iopoll */ 724 struct io_wq_work_node comp_list; 725 /* cache ->apoll->events */ 726 __poll_t apoll_events; 727 }; 728 729 struct io_rsrc_node *file_node; 730 731 atomic_t refs; 732 bool cancel_seq_set; 733 734 union { 735 struct io_task_work io_task_work; 736 /* For IOPOLL setup queues, with hybrid polling */ 737 u64 iopoll_start; 738 }; 739 740 union { 741 /* 742 * for polled requests, i.e. IORING_OP_POLL_ADD and async armed 743 * poll 744 */ 745 struct hlist_node hash_node; 746 /* IOPOLL completion handling */ 747 struct list_head iopoll_node; 748 /* for private io_kiocb freeing */ 749 struct rcu_head rcu_head; 750 }; 751 /* internal polling, see IORING_FEAT_FAST_POLL */ 752 struct async_poll *apoll; 753 /* opcode allocated if it needs to store data for async defer */ 754 void *async_data; 755 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ 756 atomic_t poll_refs; 757 struct io_kiocb *link; 758 /* custom credentials, valid IFF REQ_F_CREDS is set */ 759 const struct cred *creds; 760 struct io_wq_work work; 761 762 struct io_big_cqe { 763 u64 extra1; 764 u64 extra2; 765 } big_cqe; 766 }; 767 768 struct io_overflow_cqe { 769 struct list_head list; 770 struct io_uring_cqe cqe; 771 }; 772 #endif 773