1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
3
4 #include <linux/blkdev.h>
5 #include <linux/hashtable.h>
6 #include <linux/task_work.h>
7 #include <linux/bitmap.h>
8 #include <linux/llist.h>
9 #include <uapi/linux/io_uring.h>
10
11 enum {
12 /*
13 * A hint to not wake right away but delay until there are enough of
14 * tw's queued to match the number of CQEs the task is waiting for.
15 *
16 * Must not be used with requests generating more than one CQE.
17 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
18 */
19 IOU_F_TWQ_LAZY_WAKE = 1,
20 };
21
22 enum io_uring_cmd_flags {
23 IO_URING_F_COMPLETE_DEFER = 1,
24 IO_URING_F_UNLOCKED = 2,
25 /* the request is executed from poll, it should not be freed */
26 IO_URING_F_MULTISHOT = 4,
27 /* executed by io-wq */
28 IO_URING_F_IOWQ = 8,
29 /* executed inline from syscall */
30 IO_URING_F_INLINE = 16,
31 /* int's last bit, sign checks are usually faster than a bit test */
32 IO_URING_F_NONBLOCK = INT_MIN,
33
34 /* ctx state flags, for URING_CMD */
35 IO_URING_F_SQE128 = (1 << 8),
36 IO_URING_F_CQE32 = (1 << 9),
37 IO_URING_F_IOPOLL = (1 << 10),
38
39 /* set when uring wants to cancel a previously issued command */
40 IO_URING_F_CANCEL = (1 << 11),
41 IO_URING_F_COMPAT = (1 << 12),
42 };
43
44 struct io_wq_work_node {
45 struct io_wq_work_node *next;
46 };
47
48 struct io_wq_work_list {
49 struct io_wq_work_node *first;
50 struct io_wq_work_node *last;
51 };
52
53 struct io_wq_work {
54 struct io_wq_work_node list;
55 atomic_t flags;
56 /* place it here instead of io_kiocb as it fills padding and saves 4B */
57 int cancel_seq;
58 };
59
60 struct io_rsrc_data {
61 unsigned int nr;
62 struct io_rsrc_node **nodes;
63 };
64
65 struct io_file_table {
66 struct io_rsrc_data data;
67 unsigned long *bitmap;
68 unsigned int alloc_hint;
69 };
70
71 struct io_hash_bucket {
72 struct hlist_head list;
73 } ____cacheline_aligned_in_smp;
74
75 struct io_hash_table {
76 struct io_hash_bucket *hbs;
77 unsigned hash_bits;
78 };
79
80 struct io_mapped_region {
81 struct page **pages;
82 void *ptr;
83 unsigned nr_pages;
84 unsigned flags;
85 };
86
87 /*
88 * Return value from io_buffer_list selection, to avoid stashing it in
89 * struct io_kiocb. For legacy/classic provided buffers, keeping a reference
90 * across execution contexts are fine. But for ring provided buffers, the
91 * list may go away as soon as ->uring_lock is dropped. As the io_kiocb
92 * persists, it's better to just keep the buffer local for those cases.
93 */
94 struct io_br_sel {
95 struct io_buffer_list *buf_list;
96 /*
97 * Some selection parts return the user address, others return an error.
98 */
99 union {
100 void __user *addr;
101 ssize_t val;
102 };
103 };
104
105
106 /*
107 * Arbitrary limit, can be raised if need be
108 */
109 #define IO_RINGFD_REG_MAX 16
110
111 struct io_uring_task {
112 /* submission side */
113 int cached_refs;
114 const struct io_ring_ctx *last;
115 struct task_struct *task;
116 struct io_wq *io_wq;
117 struct file *registered_rings[IO_RINGFD_REG_MAX];
118
119 struct xarray xa;
120 struct wait_queue_head wait;
121 atomic_t in_cancel;
122 atomic_t inflight_tracked;
123 struct percpu_counter inflight;
124
125 struct { /* task_work */
126 struct llist_head task_list;
127 struct callback_head task_work;
128 } ____cacheline_aligned_in_smp;
129 };
130
131 struct iou_vec {
132 union {
133 struct iovec *iovec;
134 struct bio_vec *bvec;
135 };
136 unsigned nr; /* number of struct iovec it can hold */
137 };
138
139 struct io_uring {
140 u32 head;
141 u32 tail;
142 };
143
144 /*
145 * This data is shared with the application through the mmap at offsets
146 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
147 *
148 * The offsets to the member fields are published through struct
149 * io_sqring_offsets when calling io_uring_setup.
150 */
151 struct io_rings {
152 /*
153 * Head and tail offsets into the ring; the offsets need to be
154 * masked to get valid indices.
155 *
156 * The kernel controls head of the sq ring and the tail of the cq ring,
157 * and the application controls tail of the sq ring and the head of the
158 * cq ring.
159 */
160 struct io_uring sq, cq;
161 /*
162 * Bitmasks to apply to head and tail offsets (constant, equals
163 * ring_entries - 1)
164 */
165 u32 sq_ring_mask, cq_ring_mask;
166 /* Ring sizes (constant, power of 2) */
167 u32 sq_ring_entries, cq_ring_entries;
168 /*
169 * Number of invalid entries dropped by the kernel due to
170 * invalid index stored in array
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * After a new SQ head value was read by the application this
177 * counter includes all submissions that were dropped reaching
178 * the new SQ head (and possibly more).
179 */
180 u32 sq_dropped;
181 /*
182 * Runtime SQ flags
183 *
184 * Written by the kernel, shouldn't be modified by the
185 * application.
186 *
187 * The application needs a full memory barrier before checking
188 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
189 */
190 atomic_t sq_flags;
191 /*
192 * Runtime CQ flags
193 *
194 * Written by the application, shouldn't be modified by the
195 * kernel.
196 */
197 u32 cq_flags;
198 /*
199 * Number of completion events lost because the queue was full;
200 * this should be avoided by the application by making sure
201 * there are not more requests pending than there is space in
202 * the completion queue.
203 *
204 * Written by the kernel, shouldn't be modified by the
205 * application (i.e. get number of "new events" by comparing to
206 * cached value).
207 *
208 * As completion events come in out of order this counter is not
209 * ordered with any other data.
210 */
211 u32 cq_overflow;
212 /*
213 * Ring buffer of completion events.
214 *
215 * The kernel writes completion events fresh every time they are
216 * produced, so the application is allowed to modify pending
217 * entries.
218 */
219 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
220 };
221
222 struct io_restriction {
223 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
224 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
225 u8 sqe_flags_allowed;
226 u8 sqe_flags_required;
227 bool registered;
228 };
229
230 struct io_submit_link {
231 struct io_kiocb *head;
232 struct io_kiocb *last;
233 };
234
235 struct io_submit_state {
236 /* inline/task_work completion list, under ->uring_lock */
237 struct io_wq_work_node free_list;
238 /* batch completion logic */
239 struct io_wq_work_list compl_reqs;
240 struct io_submit_link link;
241
242 bool plug_started;
243 bool need_plug;
244 bool cq_flush;
245 unsigned short submit_nr;
246 struct blk_plug plug;
247 };
248
249 struct io_alloc_cache {
250 void **entries;
251 unsigned int nr_cached;
252 unsigned int max_cached;
253 unsigned int elem_size;
254 unsigned int init_clear;
255 };
256
257 struct io_ring_ctx {
258 /* const or read-mostly hot data */
259 struct {
260 unsigned int flags;
261 unsigned int drain_next: 1;
262 unsigned int restricted: 1;
263 unsigned int off_timeout_used: 1;
264 unsigned int drain_active: 1;
265 unsigned int has_evfd: 1;
266 /* all CQEs should be posted only by the submitter task */
267 unsigned int task_complete: 1;
268 unsigned int lockless_cq: 1;
269 unsigned int syscall_iopoll: 1;
270 unsigned int poll_activated: 1;
271 unsigned int drain_disabled: 1;
272 unsigned int compat: 1;
273 unsigned int iowq_limits_set : 1;
274
275 struct task_struct *submitter_task;
276 struct io_rings *rings;
277 struct percpu_ref refs;
278
279 clockid_t clockid;
280 enum tk_offsets clock_offset;
281
282 enum task_work_notify_mode notify_method;
283 unsigned sq_thread_idle;
284 } ____cacheline_aligned_in_smp;
285
286 /* submission data */
287 struct {
288 struct mutex uring_lock;
289
290 /*
291 * Ring buffer of indices into array of io_uring_sqe, which is
292 * mmapped by the application using the IORING_OFF_SQES offset.
293 *
294 * This indirection could e.g. be used to assign fixed
295 * io_uring_sqe entries to operations and only submit them to
296 * the queue when needed.
297 *
298 * The kernel modifies neither the indices array nor the entries
299 * array.
300 */
301 u32 *sq_array;
302 struct io_uring_sqe *sq_sqes;
303 unsigned cached_sq_head;
304 unsigned sq_entries;
305
306 /*
307 * Fixed resources fast path, should be accessed only under
308 * uring_lock, and updated through io_uring_register(2)
309 */
310 atomic_t cancel_seq;
311
312 /*
313 * ->iopoll_list is protected by the ctx->uring_lock for
314 * io_uring instances that don't use IORING_SETUP_SQPOLL.
315 * For SQPOLL, only the single threaded io_sq_thread() will
316 * manipulate the list, hence no extra locking is needed there.
317 */
318 bool poll_multi_queue;
319 struct io_wq_work_list iopoll_list;
320
321 struct io_file_table file_table;
322 struct io_rsrc_data buf_table;
323 struct io_alloc_cache node_cache;
324 struct io_alloc_cache imu_cache;
325
326 struct io_submit_state submit_state;
327
328 /*
329 * Modifications are protected by ->uring_lock and ->mmap_lock.
330 * The buffer list's io mapped region should be stable once
331 * published.
332 */
333 struct xarray io_bl_xa;
334
335 struct io_hash_table cancel_table;
336 struct io_alloc_cache apoll_cache;
337 struct io_alloc_cache netmsg_cache;
338 struct io_alloc_cache rw_cache;
339 struct io_alloc_cache cmd_cache;
340
341 /*
342 * Any cancelable uring_cmd is added to this list in
343 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
344 */
345 struct hlist_head cancelable_uring_cmd;
346 /*
347 * For Hybrid IOPOLL, runtime in hybrid polling, without
348 * scheduling time
349 */
350 u64 hybrid_poll_time;
351 } ____cacheline_aligned_in_smp;
352
353 struct {
354 /*
355 * We cache a range of free CQEs we can use, once exhausted it
356 * should go through a slower range setup, see __io_get_cqe()
357 */
358 struct io_uring_cqe *cqe_cached;
359 struct io_uring_cqe *cqe_sentinel;
360
361 unsigned cached_cq_tail;
362 unsigned cq_entries;
363 struct io_ev_fd __rcu *io_ev_fd;
364
365 void *cq_wait_arg;
366 size_t cq_wait_size;
367 } ____cacheline_aligned_in_smp;
368
369 /*
370 * task_work and async notification delivery cacheline. Expected to
371 * regularly bounce b/w CPUs.
372 */
373 struct {
374 struct llist_head work_llist;
375 struct llist_head retry_llist;
376 unsigned long check_cq;
377 atomic_t cq_wait_nr;
378 atomic_t cq_timeouts;
379 struct wait_queue_head cq_wait;
380 } ____cacheline_aligned_in_smp;
381
382 /* timeouts */
383 struct {
384 raw_spinlock_t timeout_lock;
385 struct list_head timeout_list;
386 struct list_head ltimeout_list;
387 unsigned cq_last_tm_flush;
388 } ____cacheline_aligned_in_smp;
389
390 spinlock_t completion_lock;
391
392 struct list_head cq_overflow_list;
393
394 struct hlist_head waitid_list;
395
396 #ifdef CONFIG_FUTEX
397 struct hlist_head futex_list;
398 struct io_alloc_cache futex_cache;
399 #endif
400
401 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
402 struct io_sq_data *sq_data; /* if using sq thread polling */
403
404 struct wait_queue_head sqo_sq_wait;
405 struct list_head sqd_list;
406
407 unsigned int file_alloc_start;
408 unsigned int file_alloc_end;
409
410 /* Keep this last, we don't need it for the fast path */
411 struct wait_queue_head poll_wq;
412 struct io_restriction restrictions;
413
414 /* Stores zcrx object pointers of type struct io_zcrx_ifq */
415 struct xarray zcrx_ctxs;
416
417 u32 pers_next;
418 struct xarray personalities;
419
420 /* hashed buffered write serialization */
421 struct io_wq_hash *hash_map;
422
423 /* Only used for accounting purposes */
424 struct user_struct *user;
425 struct mm_struct *mm_account;
426
427 /*
428 * List of tctx nodes for this ctx, protected by tctx_lock. For
429 * cancelation purposes, nests under uring_lock.
430 */
431 struct list_head tctx_list;
432 struct mutex tctx_lock;
433
434 /* ctx exit and cancelation */
435 struct llist_head fallback_llist;
436 struct delayed_work fallback_work;
437 struct work_struct exit_work;
438 struct completion ref_comp;
439
440 /* io-wq management, e.g. thread count */
441 u32 iowq_limits[2];
442
443 struct callback_head poll_wq_task_work;
444 struct list_head defer_list;
445 unsigned nr_drained;
446
447 #ifdef CONFIG_NET_RX_BUSY_POLL
448 struct list_head napi_list; /* track busy poll napi_id */
449 spinlock_t napi_lock; /* napi_list lock */
450
451 /* napi busy poll default timeout */
452 ktime_t napi_busy_poll_dt;
453 bool napi_prefer_busy_poll;
454 u8 napi_track_mode;
455
456 DECLARE_HASHTABLE(napi_ht, 4);
457 #endif
458
459 /* protected by ->completion_lock */
460 unsigned evfd_last_cq_tail;
461 unsigned nr_req_allocated;
462
463 /*
464 * Protection for resize vs mmap races - both the mmap and resize
465 * side will need to grab this lock, to prevent either side from
466 * being run concurrently with the other.
467 */
468 struct mutex mmap_lock;
469
470 struct io_mapped_region sq_region;
471 struct io_mapped_region ring_region;
472 /* used for optimised request parameter and wait argument passing */
473 struct io_mapped_region param_region;
474 };
475
476 /*
477 * Token indicating function is called in task work context:
478 * ctx->uring_lock is held and any completions generated will be flushed.
479 * ONLY core io_uring.c should instantiate this struct.
480 */
481 struct io_tw_state {
482 bool cancel;
483 };
484 /* Alias to use in code that doesn't instantiate struct io_tw_state */
485 typedef struct io_tw_state io_tw_token_t;
486
487 enum {
488 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
489 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
490 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
491 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
492 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
493 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
494 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
495
496 /* first byte is taken by user flags, shift it to not overlap */
497 REQ_F_FAIL_BIT = 8,
498 REQ_F_INFLIGHT_BIT,
499 REQ_F_CUR_POS_BIT,
500 REQ_F_NOWAIT_BIT,
501 REQ_F_LINK_TIMEOUT_BIT,
502 REQ_F_NEED_CLEANUP_BIT,
503 REQ_F_POLLED_BIT,
504 REQ_F_HYBRID_IOPOLL_STATE_BIT,
505 REQ_F_BUFFER_SELECTED_BIT,
506 REQ_F_BUFFER_RING_BIT,
507 REQ_F_REISSUE_BIT,
508 REQ_F_CREDS_BIT,
509 REQ_F_REFCOUNT_BIT,
510 REQ_F_ARM_LTIMEOUT_BIT,
511 REQ_F_ASYNC_DATA_BIT,
512 REQ_F_SKIP_LINK_CQES_BIT,
513 REQ_F_SINGLE_POLL_BIT,
514 REQ_F_DOUBLE_POLL_BIT,
515 REQ_F_MULTISHOT_BIT,
516 REQ_F_APOLL_MULTISHOT_BIT,
517 REQ_F_CLEAR_POLLIN_BIT,
518 /* keep async read/write and isreg together and in order */
519 REQ_F_SUPPORT_NOWAIT_BIT,
520 REQ_F_ISREG_BIT,
521 REQ_F_POLL_NO_LAZY_BIT,
522 REQ_F_CAN_POLL_BIT,
523 REQ_F_BL_EMPTY_BIT,
524 REQ_F_BL_NO_RECYCLE_BIT,
525 REQ_F_BUFFERS_COMMIT_BIT,
526 REQ_F_BUF_NODE_BIT,
527 REQ_F_HAS_METADATA_BIT,
528 REQ_F_IMPORT_BUFFER_BIT,
529 REQ_F_SQE_COPIED_BIT,
530
531 /* not a real bit, just to check we're not overflowing the space */
532 __REQ_F_LAST_BIT,
533 };
534
535 typedef u64 __bitwise io_req_flags_t;
536 #define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno)))
537
538 enum {
539 /* ctx owns file */
540 REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
541 /* drain existing IO first */
542 REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
543 /* linked sqes */
544 REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT),
545 /* doesn't sever on completion < 0 */
546 REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
547 /* IOSQE_ASYNC */
548 REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
549 /* IOSQE_BUFFER_SELECT */
550 REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
551 /* IOSQE_CQE_SKIP_SUCCESS */
552 REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
553
554 /* fail rest of links */
555 REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT),
556 /* on inflight list, should be cancelled and waited on exit reliably */
557 REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
558 /* read/write uses file position */
559 REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
560 /* must not punt to workers */
561 REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
562 /* has or had linked timeout */
563 REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
564 /* needs cleanup */
565 REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
566 /* already went through poll handler */
567 REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
568 /* every req only blocks once in hybrid poll */
569 REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
570 /* buffer already selected */
571 REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
572 /* buffer selected from ring, needs commit */
573 REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
574 /* caller should reissue async */
575 REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT),
576 /* supports async reads/writes */
577 REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
578 /* regular file */
579 REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT),
580 /* has creds assigned */
581 REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT),
582 /* skip refcounting if not set */
583 REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
584 /* there is a linked timeout that has to be armed */
585 REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
586 /* ->async_data allocated */
587 REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
588 /* don't post CQEs while failing linked requests */
589 REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
590 /* single poll may be active */
591 REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
592 /* double poll may active */
593 REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
594 /* request posts multiple completions, should be set at prep time */
595 REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
596 /* fast poll multishot mode */
597 REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
598 /* recvmsg special flag, clear EPOLLIN */
599 REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
600 /* don't use lazy poll wake for this request */
601 REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
602 /* file is pollable */
603 REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
604 /* buffer list was empty after selection of buffer */
605 REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
606 /* don't recycle provided buffers for this request */
607 REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
608 /* buffer ring head needs incrementing on put */
609 REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
610 /* buf node is valid */
611 REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
612 /* request has read/write metadata assigned */
613 REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
614 /*
615 * For vectored fixed buffers, resolve iovec to registered buffers.
616 * For SEND_ZC, whether to import buffers (i.e. the first issue).
617 */
618 REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
619 /* ->sqe_copy() has been called, if necessary */
620 REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
621 };
622
623 struct io_tw_req {
624 struct io_kiocb *req;
625 };
626
627 typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw);
628
629 struct io_task_work {
630 struct llist_node node;
631 io_req_tw_func_t func;
632 };
633
634 struct io_cqe {
635 __u64 user_data;
636 __s32 res;
637 /* fd initially, then cflags for completion */
638 union {
639 __u32 flags;
640 int fd;
641 };
642 };
643
644 /*
645 * Each request type overlays its private data structure on top of this one.
646 * They must not exceed this one in size.
647 */
648 struct io_cmd_data {
649 struct file *file;
650 /* each command gets 56 bytes of data */
651 __u8 data[56];
652 };
653
io_kiocb_cmd_sz_check(size_t cmd_sz)654 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
655 {
656 BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
657 }
658 #define io_kiocb_to_cmd(req, cmd_type) ( \
659 io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
660 ((cmd_type *)&(req)->cmd) \
661 )
662
cmd_to_io_kiocb(void * ptr)663 static inline struct io_kiocb *cmd_to_io_kiocb(void *ptr)
664 {
665 return ptr;
666 }
667
668 struct io_kiocb {
669 union {
670 /*
671 * NOTE! Each of the io_kiocb union members has the file pointer
672 * as the first entry in their struct definition. So you can
673 * access the file pointer through any of the sub-structs,
674 * or directly as just 'file' in this struct.
675 */
676 struct file *file;
677 struct io_cmd_data cmd;
678 };
679
680 u8 opcode;
681 /* polled IO has completed */
682 u8 iopoll_completed;
683 /*
684 * Can be either a fixed buffer index, or used with provided buffers.
685 * For the latter, it points to the selected buffer ID.
686 */
687 u16 buf_index;
688
689 unsigned nr_tw;
690
691 /* REQ_F_* flags */
692 io_req_flags_t flags;
693
694 struct io_cqe cqe;
695
696 struct io_ring_ctx *ctx;
697 struct io_uring_task *tctx;
698
699 union {
700 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
701 struct io_buffer *kbuf;
702
703 struct io_rsrc_node *buf_node;
704 };
705
706 union {
707 /* used by request caches, completion batching and iopoll */
708 struct io_wq_work_node comp_list;
709 /* cache ->apoll->events */
710 __poll_t apoll_events;
711 };
712
713 struct io_rsrc_node *file_node;
714
715 atomic_t refs;
716 bool cancel_seq_set;
717 struct io_task_work io_task_work;
718 union {
719 /*
720 * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
721 * poll
722 */
723 struct hlist_node hash_node;
724 /* For IOPOLL setup queues, with hybrid polling */
725 u64 iopoll_start;
726 /* for private io_kiocb freeing */
727 struct rcu_head rcu_head;
728 };
729 /* internal polling, see IORING_FEAT_FAST_POLL */
730 struct async_poll *apoll;
731 /* opcode allocated if it needs to store data for async defer */
732 void *async_data;
733 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
734 atomic_t poll_refs;
735 struct io_kiocb *link;
736 /* custom credentials, valid IFF REQ_F_CREDS is set */
737 const struct cred *creds;
738 struct io_wq_work work;
739
740 struct io_big_cqe {
741 u64 extra1;
742 u64 extra2;
743 } big_cqe;
744 };
745
746 struct io_overflow_cqe {
747 struct list_head list;
748 struct io_uring_cqe cqe;
749 };
750 #endif
751