1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
3
4 #include <linux/blkdev.h>
5 #include <linux/hashtable.h>
6 #include <linux/task_work.h>
7 #include <linux/bitmap.h>
8 #include <linux/llist.h>
9 #include <uapi/linux/io_uring.h>
10
11 enum {
12 /*
13 * A hint to not wake right away but delay until there are enough of
14 * tw's queued to match the number of CQEs the task is waiting for.
15 *
16 * Must not be used with requests generating more than one CQE.
17 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
18 */
19 IOU_F_TWQ_LAZY_WAKE = 1,
20 };
21
22 enum io_uring_cmd_flags {
23 IO_URING_F_COMPLETE_DEFER = 1,
24 IO_URING_F_UNLOCKED = 2,
25 /* the request is executed from poll, it should not be freed */
26 IO_URING_F_MULTISHOT = 4,
27 /* executed by io-wq */
28 IO_URING_F_IOWQ = 8,
29 /* int's last bit, sign checks are usually faster than a bit test */
30 IO_URING_F_NONBLOCK = INT_MIN,
31
32 /* ctx state flags, for URING_CMD */
33 IO_URING_F_SQE128 = (1 << 8),
34 IO_URING_F_CQE32 = (1 << 9),
35 IO_URING_F_IOPOLL = (1 << 10),
36
37 /* set when uring wants to cancel a previously issued command */
38 IO_URING_F_CANCEL = (1 << 11),
39 IO_URING_F_COMPAT = (1 << 12),
40 };
41
42 struct io_wq_work_node {
43 struct io_wq_work_node *next;
44 };
45
46 struct io_wq_work_list {
47 struct io_wq_work_node *first;
48 struct io_wq_work_node *last;
49 };
50
51 struct io_wq_work {
52 struct io_wq_work_node list;
53 atomic_t flags;
54 /* place it here instead of io_kiocb as it fills padding and saves 4B */
55 int cancel_seq;
56 };
57
58 struct io_fixed_file {
59 /* file * with additional FFS_* flags */
60 unsigned long file_ptr;
61 };
62
63 struct io_file_table {
64 struct io_fixed_file *files;
65 unsigned long *bitmap;
66 unsigned int alloc_hint;
67 };
68
69 struct io_hash_bucket {
70 spinlock_t lock;
71 struct hlist_head list;
72 } ____cacheline_aligned_in_smp;
73
74 struct io_hash_table {
75 struct io_hash_bucket *hbs;
76 unsigned hash_bits;
77 };
78
79 /*
80 * Arbitrary limit, can be raised if need be
81 */
82 #define IO_RINGFD_REG_MAX 16
83
84 struct io_uring_task {
85 /* submission side */
86 int cached_refs;
87 const struct io_ring_ctx *last;
88 struct io_wq *io_wq;
89 struct file *registered_rings[IO_RINGFD_REG_MAX];
90
91 struct xarray xa;
92 struct wait_queue_head wait;
93 atomic_t in_cancel;
94 atomic_t inflight_tracked;
95 struct percpu_counter inflight;
96
97 struct { /* task_work */
98 struct llist_head task_list;
99 struct callback_head task_work;
100 } ____cacheline_aligned_in_smp;
101 };
102
103 struct io_uring {
104 u32 head;
105 u32 tail;
106 };
107
108 /*
109 * This data is shared with the application through the mmap at offsets
110 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
111 *
112 * The offsets to the member fields are published through struct
113 * io_sqring_offsets when calling io_uring_setup.
114 */
115 struct io_rings {
116 /*
117 * Head and tail offsets into the ring; the offsets need to be
118 * masked to get valid indices.
119 *
120 * The kernel controls head of the sq ring and the tail of the cq ring,
121 * and the application controls tail of the sq ring and the head of the
122 * cq ring.
123 */
124 struct io_uring sq, cq;
125 /*
126 * Bitmasks to apply to head and tail offsets (constant, equals
127 * ring_entries - 1)
128 */
129 u32 sq_ring_mask, cq_ring_mask;
130 /* Ring sizes (constant, power of 2) */
131 u32 sq_ring_entries, cq_ring_entries;
132 /*
133 * Number of invalid entries dropped by the kernel due to
134 * invalid index stored in array
135 *
136 * Written by the kernel, shouldn't be modified by the
137 * application (i.e. get number of "new events" by comparing to
138 * cached value).
139 *
140 * After a new SQ head value was read by the application this
141 * counter includes all submissions that were dropped reaching
142 * the new SQ head (and possibly more).
143 */
144 u32 sq_dropped;
145 /*
146 * Runtime SQ flags
147 *
148 * Written by the kernel, shouldn't be modified by the
149 * application.
150 *
151 * The application needs a full memory barrier before checking
152 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
153 */
154 atomic_t sq_flags;
155 /*
156 * Runtime CQ flags
157 *
158 * Written by the application, shouldn't be modified by the
159 * kernel.
160 */
161 u32 cq_flags;
162 /*
163 * Number of completion events lost because the queue was full;
164 * this should be avoided by the application by making sure
165 * there are not more requests pending than there is space in
166 * the completion queue.
167 *
168 * Written by the kernel, shouldn't be modified by the
169 * application (i.e. get number of "new events" by comparing to
170 * cached value).
171 *
172 * As completion events come in out of order this counter is not
173 * ordered with any other data.
174 */
175 u32 cq_overflow;
176 /*
177 * Ring buffer of completion events.
178 *
179 * The kernel writes completion events fresh every time they are
180 * produced, so the application is allowed to modify pending
181 * entries.
182 */
183 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
184 };
185
186 struct io_restriction {
187 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
188 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
189 u8 sqe_flags_allowed;
190 u8 sqe_flags_required;
191 bool registered;
192 };
193
194 struct io_submit_link {
195 struct io_kiocb *head;
196 struct io_kiocb *last;
197 };
198
199 struct io_submit_state {
200 /* inline/task_work completion list, under ->uring_lock */
201 struct io_wq_work_node free_list;
202 /* batch completion logic */
203 struct io_wq_work_list compl_reqs;
204 struct io_submit_link link;
205
206 bool plug_started;
207 bool need_plug;
208 bool cq_flush;
209 unsigned short submit_nr;
210 struct blk_plug plug;
211 };
212
213 struct io_alloc_cache {
214 void **entries;
215 unsigned int nr_cached;
216 unsigned int max_cached;
217 size_t elem_size;
218 };
219
220 struct io_ring_ctx {
221 /* const or read-mostly hot data */
222 struct {
223 unsigned int flags;
224 unsigned int drain_next: 1;
225 unsigned int restricted: 1;
226 unsigned int off_timeout_used: 1;
227 unsigned int drain_active: 1;
228 unsigned int has_evfd: 1;
229 /* all CQEs should be posted only by the submitter task */
230 unsigned int task_complete: 1;
231 unsigned int lockless_cq: 1;
232 unsigned int syscall_iopoll: 1;
233 unsigned int poll_activated: 1;
234 unsigned int drain_disabled: 1;
235 unsigned int compat: 1;
236 unsigned int iowq_limits_set : 1;
237
238 struct task_struct *submitter_task;
239 struct io_rings *rings;
240 struct percpu_ref refs;
241
242 clockid_t clockid;
243 enum tk_offsets clock_offset;
244
245 enum task_work_notify_mode notify_method;
246 unsigned sq_thread_idle;
247 } ____cacheline_aligned_in_smp;
248
249 /* submission data */
250 struct {
251 struct mutex uring_lock;
252
253 /*
254 * Ring buffer of indices into array of io_uring_sqe, which is
255 * mmapped by the application using the IORING_OFF_SQES offset.
256 *
257 * This indirection could e.g. be used to assign fixed
258 * io_uring_sqe entries to operations and only submit them to
259 * the queue when needed.
260 *
261 * The kernel modifies neither the indices array nor the entries
262 * array.
263 */
264 u32 *sq_array;
265 struct io_uring_sqe *sq_sqes;
266 unsigned cached_sq_head;
267 unsigned sq_entries;
268
269 /*
270 * Fixed resources fast path, should be accessed only under
271 * uring_lock, and updated through io_uring_register(2)
272 */
273 struct io_rsrc_node *rsrc_node;
274 atomic_t cancel_seq;
275
276 /*
277 * ->iopoll_list is protected by the ctx->uring_lock for
278 * io_uring instances that don't use IORING_SETUP_SQPOLL.
279 * For SQPOLL, only the single threaded io_sq_thread() will
280 * manipulate the list, hence no extra locking is needed there.
281 */
282 bool poll_multi_queue;
283 struct io_wq_work_list iopoll_list;
284
285 struct io_file_table file_table;
286 struct io_mapped_ubuf **user_bufs;
287 unsigned nr_user_files;
288 unsigned nr_user_bufs;
289
290 struct io_submit_state submit_state;
291
292 struct xarray io_bl_xa;
293
294 struct io_hash_table cancel_table_locked;
295 struct io_alloc_cache apoll_cache;
296 struct io_alloc_cache netmsg_cache;
297 struct io_alloc_cache rw_cache;
298 struct io_alloc_cache uring_cache;
299
300 /*
301 * Any cancelable uring_cmd is added to this list in
302 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
303 */
304 struct hlist_head cancelable_uring_cmd;
305 } ____cacheline_aligned_in_smp;
306
307 struct {
308 /*
309 * We cache a range of free CQEs we can use, once exhausted it
310 * should go through a slower range setup, see __io_get_cqe()
311 */
312 struct io_uring_cqe *cqe_cached;
313 struct io_uring_cqe *cqe_sentinel;
314
315 unsigned cached_cq_tail;
316 unsigned cq_entries;
317 struct io_ev_fd __rcu *io_ev_fd;
318 unsigned cq_extra;
319 } ____cacheline_aligned_in_smp;
320
321 /*
322 * task_work and async notification delivery cacheline. Expected to
323 * regularly bounce b/w CPUs.
324 */
325 struct {
326 struct llist_head work_llist;
327 unsigned long check_cq;
328 atomic_t cq_wait_nr;
329 atomic_t cq_timeouts;
330 struct wait_queue_head cq_wait;
331 } ____cacheline_aligned_in_smp;
332
333 /* timeouts */
334 struct {
335 spinlock_t timeout_lock;
336 struct list_head timeout_list;
337 struct list_head ltimeout_list;
338 unsigned cq_last_tm_flush;
339 } ____cacheline_aligned_in_smp;
340
341 spinlock_t completion_lock;
342
343 struct list_head io_buffers_comp;
344 struct list_head cq_overflow_list;
345 struct io_hash_table cancel_table;
346
347 struct hlist_head waitid_list;
348
349 #ifdef CONFIG_FUTEX
350 struct hlist_head futex_list;
351 struct io_alloc_cache futex_cache;
352 #endif
353
354 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
355 struct io_sq_data *sq_data; /* if using sq thread polling */
356
357 struct wait_queue_head sqo_sq_wait;
358 struct list_head sqd_list;
359
360 unsigned int file_alloc_start;
361 unsigned int file_alloc_end;
362
363 struct list_head io_buffers_cache;
364
365 /* Keep this last, we don't need it for the fast path */
366 struct wait_queue_head poll_wq;
367 struct io_restriction restrictions;
368
369 /* slow path rsrc auxilary data, used by update/register */
370 struct io_rsrc_data *file_data;
371 struct io_rsrc_data *buf_data;
372
373 /* protected by ->uring_lock */
374 struct list_head rsrc_ref_list;
375 struct io_alloc_cache rsrc_node_cache;
376 struct wait_queue_head rsrc_quiesce_wq;
377 unsigned rsrc_quiesce;
378
379 u32 pers_next;
380 struct xarray personalities;
381
382 /* hashed buffered write serialization */
383 struct io_wq_hash *hash_map;
384
385 /* Only used for accounting purposes */
386 struct user_struct *user;
387 struct mm_struct *mm_account;
388
389 /* ctx exit and cancelation */
390 struct llist_head fallback_llist;
391 struct delayed_work fallback_work;
392 struct work_struct exit_work;
393 struct list_head tctx_list;
394 struct completion ref_comp;
395
396 /* io-wq management, e.g. thread count */
397 u32 iowq_limits[2];
398
399 struct callback_head poll_wq_task_work;
400 struct list_head defer_list;
401
402 struct io_alloc_cache msg_cache;
403 spinlock_t msg_lock;
404
405 #ifdef CONFIG_NET_RX_BUSY_POLL
406 struct list_head napi_list; /* track busy poll napi_id */
407 spinlock_t napi_lock; /* napi_list lock */
408
409 /* napi busy poll default timeout */
410 ktime_t napi_busy_poll_dt;
411 bool napi_prefer_busy_poll;
412 bool napi_enabled;
413
414 DECLARE_HASHTABLE(napi_ht, 4);
415 #endif
416
417 /* protected by ->completion_lock */
418 unsigned evfd_last_cq_tail;
419
420 /*
421 * If IORING_SETUP_NO_MMAP is used, then the below holds
422 * the gup'ed pages for the two rings, and the sqes.
423 */
424 unsigned short n_ring_pages;
425 unsigned short n_sqe_pages;
426 struct page **ring_pages;
427 struct page **sqe_pages;
428 };
429
430 struct io_tw_state {
431 };
432
433 enum {
434 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
435 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
436 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
437 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
438 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
439 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
440 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
441
442 /* first byte is taken by user flags, shift it to not overlap */
443 REQ_F_FAIL_BIT = 8,
444 REQ_F_INFLIGHT_BIT,
445 REQ_F_CUR_POS_BIT,
446 REQ_F_NOWAIT_BIT,
447 REQ_F_LINK_TIMEOUT_BIT,
448 REQ_F_NEED_CLEANUP_BIT,
449 REQ_F_POLLED_BIT,
450 REQ_F_BUFFER_SELECTED_BIT,
451 REQ_F_BUFFER_RING_BIT,
452 REQ_F_REISSUE_BIT,
453 REQ_F_CREDS_BIT,
454 REQ_F_REFCOUNT_BIT,
455 REQ_F_ARM_LTIMEOUT_BIT,
456 REQ_F_ASYNC_DATA_BIT,
457 REQ_F_SKIP_LINK_CQES_BIT,
458 REQ_F_SINGLE_POLL_BIT,
459 REQ_F_DOUBLE_POLL_BIT,
460 REQ_F_APOLL_MULTISHOT_BIT,
461 REQ_F_CLEAR_POLLIN_BIT,
462 REQ_F_HASH_LOCKED_BIT,
463 /* keep async read/write and isreg together and in order */
464 REQ_F_SUPPORT_NOWAIT_BIT,
465 REQ_F_ISREG_BIT,
466 REQ_F_POLL_NO_LAZY_BIT,
467 REQ_F_CAN_POLL_BIT,
468 REQ_F_BL_EMPTY_BIT,
469 REQ_F_BL_NO_RECYCLE_BIT,
470 REQ_F_BUFFERS_COMMIT_BIT,
471
472 /* not a real bit, just to check we're not overflowing the space */
473 __REQ_F_LAST_BIT,
474 };
475
476 typedef u64 __bitwise io_req_flags_t;
477 #define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno)))
478
479 enum {
480 /* ctx owns file */
481 REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
482 /* drain existing IO first */
483 REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
484 /* linked sqes */
485 REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT),
486 /* doesn't sever on completion < 0 */
487 REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
488 /* IOSQE_ASYNC */
489 REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
490 /* IOSQE_BUFFER_SELECT */
491 REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
492 /* IOSQE_CQE_SKIP_SUCCESS */
493 REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
494
495 /* fail rest of links */
496 REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT),
497 /* on inflight list, should be cancelled and waited on exit reliably */
498 REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
499 /* read/write uses file position */
500 REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
501 /* must not punt to workers */
502 REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
503 /* has or had linked timeout */
504 REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
505 /* needs cleanup */
506 REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
507 /* already went through poll handler */
508 REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
509 /* buffer already selected */
510 REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
511 /* buffer selected from ring, needs commit */
512 REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
513 /* caller should reissue async */
514 REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT),
515 /* supports async reads/writes */
516 REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
517 /* regular file */
518 REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT),
519 /* has creds assigned */
520 REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT),
521 /* skip refcounting if not set */
522 REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
523 /* there is a linked timeout that has to be armed */
524 REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
525 /* ->async_data allocated */
526 REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
527 /* don't post CQEs while failing linked requests */
528 REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
529 /* single poll may be active */
530 REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
531 /* double poll may active */
532 REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
533 /* fast poll multishot mode */
534 REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
535 /* recvmsg special flag, clear EPOLLIN */
536 REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
537 /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
538 REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
539 /* don't use lazy poll wake for this request */
540 REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
541 /* file is pollable */
542 REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
543 /* buffer list was empty after selection of buffer */
544 REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
545 /* don't recycle provided buffers for this request */
546 REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
547 /* buffer ring head needs incrementing on put */
548 REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
549 };
550
551 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
552
553 struct io_task_work {
554 struct llist_node node;
555 io_req_tw_func_t func;
556 };
557
558 struct io_cqe {
559 __u64 user_data;
560 __s32 res;
561 /* fd initially, then cflags for completion */
562 union {
563 __u32 flags;
564 int fd;
565 };
566 };
567
568 /*
569 * Each request type overlays its private data structure on top of this one.
570 * They must not exceed this one in size.
571 */
572 struct io_cmd_data {
573 struct file *file;
574 /* each command gets 56 bytes of data */
575 __u8 data[56];
576 };
577
io_kiocb_cmd_sz_check(size_t cmd_sz)578 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
579 {
580 BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
581 }
582 #define io_kiocb_to_cmd(req, cmd_type) ( \
583 io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
584 ((cmd_type *)&(req)->cmd) \
585 )
586 #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
587
588 struct io_kiocb {
589 union {
590 /*
591 * NOTE! Each of the io_kiocb union members has the file pointer
592 * as the first entry in their struct definition. So you can
593 * access the file pointer through any of the sub-structs,
594 * or directly as just 'file' in this struct.
595 */
596 struct file *file;
597 struct io_cmd_data cmd;
598 };
599
600 u8 opcode;
601 /* polled IO has completed */
602 u8 iopoll_completed;
603 /*
604 * Can be either a fixed buffer index, or used with provided buffers.
605 * For the latter, before issue it points to the buffer group ID,
606 * and after selection it points to the buffer ID itself.
607 */
608 u16 buf_index;
609
610 unsigned nr_tw;
611
612 /* REQ_F_* flags */
613 io_req_flags_t flags;
614
615 struct io_cqe cqe;
616
617 struct io_ring_ctx *ctx;
618 struct task_struct *task;
619
620 union {
621 /* store used ubuf, so we can prevent reloading */
622 struct io_mapped_ubuf *imu;
623
624 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
625 struct io_buffer *kbuf;
626
627 /*
628 * stores buffer ID for ring provided buffers, valid IFF
629 * REQ_F_BUFFER_RING is set.
630 */
631 struct io_buffer_list *buf_list;
632 };
633
634 union {
635 /* used by request caches, completion batching and iopoll */
636 struct io_wq_work_node comp_list;
637 /* cache ->apoll->events */
638 __poll_t apoll_events;
639 };
640
641 struct io_rsrc_node *rsrc_node;
642
643 atomic_t refs;
644 bool cancel_seq_set;
645 struct io_task_work io_task_work;
646 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
647 struct hlist_node hash_node;
648 /* internal polling, see IORING_FEAT_FAST_POLL */
649 struct async_poll *apoll;
650 /* opcode allocated if it needs to store data for async defer */
651 void *async_data;
652 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
653 atomic_t poll_refs;
654 struct io_kiocb *link;
655 /* custom credentials, valid IFF REQ_F_CREDS is set */
656 const struct cred *creds;
657 struct io_wq_work work;
658
659 struct {
660 u64 extra1;
661 u64 extra2;
662 } big_cqe;
663 };
664
665 struct io_overflow_cqe {
666 struct list_head list;
667 struct io_uring_cqe cqe;
668 };
669
670 #endif
671