xref: /linux/include/linux/io_uring_types.h (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
3 
4 #include <linux/blkdev.h>
5 #include <linux/hashtable.h>
6 #include <linux/task_work.h>
7 #include <linux/bitmap.h>
8 #include <linux/llist.h>
9 #include <uapi/linux/io_uring.h>
10 
11 enum {
12 	/*
13 	 * A hint to not wake right away but delay until there are enough of
14 	 * tw's queued to match the number of CQEs the task is waiting for.
15 	 *
16 	 * Must not be used with requests generating more than one CQE.
17 	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
18 	 */
19 	IOU_F_TWQ_LAZY_WAKE			= 1,
20 };
21 
22 enum io_uring_cmd_flags {
23 	IO_URING_F_COMPLETE_DEFER	= 1,
24 	IO_URING_F_UNLOCKED		= 2,
25 	/* the request is executed from poll, it should not be freed */
26 	IO_URING_F_MULTISHOT		= 4,
27 	/* executed by io-wq */
28 	IO_URING_F_IOWQ			= 8,
29 	/* int's last bit, sign checks are usually faster than a bit test */
30 	IO_URING_F_NONBLOCK		= INT_MIN,
31 
32 	/* ctx state flags, for URING_CMD */
33 	IO_URING_F_SQE128		= (1 << 8),
34 	IO_URING_F_CQE32		= (1 << 9),
35 	IO_URING_F_IOPOLL		= (1 << 10),
36 
37 	/* set when uring wants to cancel a previously issued command */
38 	IO_URING_F_CANCEL		= (1 << 11),
39 	IO_URING_F_COMPAT		= (1 << 12),
40 	IO_URING_F_TASK_DEAD		= (1 << 13),
41 };
42 
43 struct io_wq_work_node {
44 	struct io_wq_work_node *next;
45 };
46 
47 struct io_wq_work_list {
48 	struct io_wq_work_node *first;
49 	struct io_wq_work_node *last;
50 };
51 
52 struct io_wq_work {
53 	struct io_wq_work_node list;
54 	atomic_t flags;
55 	/* place it here instead of io_kiocb as it fills padding and saves 4B */
56 	int cancel_seq;
57 };
58 
59 struct io_rsrc_data {
60 	unsigned int			nr;
61 	struct io_rsrc_node		**nodes;
62 };
63 
64 struct io_file_table {
65 	struct io_rsrc_data data;
66 	unsigned long *bitmap;
67 	unsigned int alloc_hint;
68 };
69 
70 struct io_hash_bucket {
71 	struct hlist_head	list;
72 } ____cacheline_aligned_in_smp;
73 
74 struct io_hash_table {
75 	struct io_hash_bucket	*hbs;
76 	unsigned		hash_bits;
77 };
78 
79 struct io_mapped_region {
80 	struct page		**pages;
81 	void			*ptr;
82 	unsigned		nr_pages;
83 	unsigned		flags;
84 };
85 
86 /*
87  * Arbitrary limit, can be raised if need be
88  */
89 #define IO_RINGFD_REG_MAX 16
90 
91 struct io_uring_task {
92 	/* submission side */
93 	int				cached_refs;
94 	const struct io_ring_ctx 	*last;
95 	struct task_struct		*task;
96 	struct io_wq			*io_wq;
97 	struct file			*registered_rings[IO_RINGFD_REG_MAX];
98 
99 	struct xarray			xa;
100 	struct wait_queue_head		wait;
101 	atomic_t			in_cancel;
102 	atomic_t			inflight_tracked;
103 	struct percpu_counter		inflight;
104 
105 	struct { /* task_work */
106 		struct llist_head	task_list;
107 		struct callback_head	task_work;
108 	} ____cacheline_aligned_in_smp;
109 };
110 
111 struct io_uring {
112 	u32 head;
113 	u32 tail;
114 };
115 
116 /*
117  * This data is shared with the application through the mmap at offsets
118  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
119  *
120  * The offsets to the member fields are published through struct
121  * io_sqring_offsets when calling io_uring_setup.
122  */
123 struct io_rings {
124 	/*
125 	 * Head and tail offsets into the ring; the offsets need to be
126 	 * masked to get valid indices.
127 	 *
128 	 * The kernel controls head of the sq ring and the tail of the cq ring,
129 	 * and the application controls tail of the sq ring and the head of the
130 	 * cq ring.
131 	 */
132 	struct io_uring		sq, cq;
133 	/*
134 	 * Bitmasks to apply to head and tail offsets (constant, equals
135 	 * ring_entries - 1)
136 	 */
137 	u32			sq_ring_mask, cq_ring_mask;
138 	/* Ring sizes (constant, power of 2) */
139 	u32			sq_ring_entries, cq_ring_entries;
140 	/*
141 	 * Number of invalid entries dropped by the kernel due to
142 	 * invalid index stored in array
143 	 *
144 	 * Written by the kernel, shouldn't be modified by the
145 	 * application (i.e. get number of "new events" by comparing to
146 	 * cached value).
147 	 *
148 	 * After a new SQ head value was read by the application this
149 	 * counter includes all submissions that were dropped reaching
150 	 * the new SQ head (and possibly more).
151 	 */
152 	u32			sq_dropped;
153 	/*
154 	 * Runtime SQ flags
155 	 *
156 	 * Written by the kernel, shouldn't be modified by the
157 	 * application.
158 	 *
159 	 * The application needs a full memory barrier before checking
160 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
161 	 */
162 	atomic_t		sq_flags;
163 	/*
164 	 * Runtime CQ flags
165 	 *
166 	 * Written by the application, shouldn't be modified by the
167 	 * kernel.
168 	 */
169 	u32			cq_flags;
170 	/*
171 	 * Number of completion events lost because the queue was full;
172 	 * this should be avoided by the application by making sure
173 	 * there are not more requests pending than there is space in
174 	 * the completion queue.
175 	 *
176 	 * Written by the kernel, shouldn't be modified by the
177 	 * application (i.e. get number of "new events" by comparing to
178 	 * cached value).
179 	 *
180 	 * As completion events come in out of order this counter is not
181 	 * ordered with any other data.
182 	 */
183 	u32			cq_overflow;
184 	/*
185 	 * Ring buffer of completion events.
186 	 *
187 	 * The kernel writes completion events fresh every time they are
188 	 * produced, so the application is allowed to modify pending
189 	 * entries.
190 	 */
191 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
192 };
193 
194 struct io_restriction {
195 	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
196 	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
197 	u8 sqe_flags_allowed;
198 	u8 sqe_flags_required;
199 	bool registered;
200 };
201 
202 struct io_submit_link {
203 	struct io_kiocb		*head;
204 	struct io_kiocb		*last;
205 };
206 
207 struct io_submit_state {
208 	/* inline/task_work completion list, under ->uring_lock */
209 	struct io_wq_work_node	free_list;
210 	/* batch completion logic */
211 	struct io_wq_work_list	compl_reqs;
212 	struct io_submit_link	link;
213 
214 	bool			plug_started;
215 	bool			need_plug;
216 	bool			cq_flush;
217 	unsigned short		submit_nr;
218 	struct blk_plug		plug;
219 };
220 
221 struct io_alloc_cache {
222 	void			**entries;
223 	unsigned int		nr_cached;
224 	unsigned int		max_cached;
225 	unsigned int		elem_size;
226 	unsigned int		init_clear;
227 };
228 
229 struct io_ring_ctx {
230 	/* const or read-mostly hot data */
231 	struct {
232 		unsigned int		flags;
233 		unsigned int		drain_next: 1;
234 		unsigned int		restricted: 1;
235 		unsigned int		off_timeout_used: 1;
236 		unsigned int		drain_active: 1;
237 		unsigned int		has_evfd: 1;
238 		/* all CQEs should be posted only by the submitter task */
239 		unsigned int		task_complete: 1;
240 		unsigned int		lockless_cq: 1;
241 		unsigned int		syscall_iopoll: 1;
242 		unsigned int		poll_activated: 1;
243 		unsigned int		drain_disabled: 1;
244 		unsigned int		compat: 1;
245 		unsigned int		iowq_limits_set : 1;
246 
247 		struct task_struct	*submitter_task;
248 		struct io_rings		*rings;
249 		struct percpu_ref	refs;
250 
251 		clockid_t		clockid;
252 		enum tk_offsets		clock_offset;
253 
254 		enum task_work_notify_mode	notify_method;
255 		unsigned			sq_thread_idle;
256 	} ____cacheline_aligned_in_smp;
257 
258 	/* submission data */
259 	struct {
260 		struct mutex		uring_lock;
261 
262 		/*
263 		 * Ring buffer of indices into array of io_uring_sqe, which is
264 		 * mmapped by the application using the IORING_OFF_SQES offset.
265 		 *
266 		 * This indirection could e.g. be used to assign fixed
267 		 * io_uring_sqe entries to operations and only submit them to
268 		 * the queue when needed.
269 		 *
270 		 * The kernel modifies neither the indices array nor the entries
271 		 * array.
272 		 */
273 		u32			*sq_array;
274 		struct io_uring_sqe	*sq_sqes;
275 		unsigned		cached_sq_head;
276 		unsigned		sq_entries;
277 
278 		/*
279 		 * Fixed resources fast path, should be accessed only under
280 		 * uring_lock, and updated through io_uring_register(2)
281 		 */
282 		atomic_t		cancel_seq;
283 
284 		/*
285 		 * ->iopoll_list is protected by the ctx->uring_lock for
286 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
287 		 * For SQPOLL, only the single threaded io_sq_thread() will
288 		 * manipulate the list, hence no extra locking is needed there.
289 		 */
290 		bool			poll_multi_queue;
291 		struct io_wq_work_list	iopoll_list;
292 
293 		struct io_file_table	file_table;
294 		struct io_rsrc_data	buf_table;
295 
296 		struct io_submit_state	submit_state;
297 
298 		/*
299 		 * Modifications are protected by ->uring_lock and ->mmap_lock.
300 		 * The flags, buf_pages and buf_nr_pages fields should be stable
301 		 * once published.
302 		 */
303 		struct xarray		io_bl_xa;
304 
305 		struct io_hash_table	cancel_table;
306 		struct io_alloc_cache	apoll_cache;
307 		struct io_alloc_cache	netmsg_cache;
308 		struct io_alloc_cache	rw_cache;
309 		struct io_alloc_cache	uring_cache;
310 
311 		/*
312 		 * Any cancelable uring_cmd is added to this list in
313 		 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
314 		 */
315 		struct hlist_head	cancelable_uring_cmd;
316 		/*
317 		 * For Hybrid IOPOLL, runtime in hybrid polling, without
318 		 * scheduling time
319 		 */
320 		u64					hybrid_poll_time;
321 	} ____cacheline_aligned_in_smp;
322 
323 	struct {
324 		/*
325 		 * We cache a range of free CQEs we can use, once exhausted it
326 		 * should go through a slower range setup, see __io_get_cqe()
327 		 */
328 		struct io_uring_cqe	*cqe_cached;
329 		struct io_uring_cqe	*cqe_sentinel;
330 
331 		unsigned		cached_cq_tail;
332 		unsigned		cq_entries;
333 		struct io_ev_fd	__rcu	*io_ev_fd;
334 		unsigned		cq_extra;
335 
336 		void			*cq_wait_arg;
337 		size_t			cq_wait_size;
338 	} ____cacheline_aligned_in_smp;
339 
340 	/*
341 	 * task_work and async notification delivery cacheline. Expected to
342 	 * regularly bounce b/w CPUs.
343 	 */
344 	struct {
345 		struct llist_head	work_llist;
346 		struct llist_head	retry_llist;
347 		unsigned long		check_cq;
348 		atomic_t		cq_wait_nr;
349 		atomic_t		cq_timeouts;
350 		struct wait_queue_head	cq_wait;
351 	} ____cacheline_aligned_in_smp;
352 
353 	/* timeouts */
354 	struct {
355 		raw_spinlock_t		timeout_lock;
356 		struct list_head	timeout_list;
357 		struct list_head	ltimeout_list;
358 		unsigned		cq_last_tm_flush;
359 	} ____cacheline_aligned_in_smp;
360 
361 	spinlock_t		completion_lock;
362 
363 	struct list_head	io_buffers_comp;
364 	struct list_head	cq_overflow_list;
365 
366 	struct hlist_head	waitid_list;
367 
368 #ifdef CONFIG_FUTEX
369 	struct hlist_head	futex_list;
370 	struct io_alloc_cache	futex_cache;
371 #endif
372 
373 	const struct cred	*sq_creds;	/* cred used for __io_sq_thread() */
374 	struct io_sq_data	*sq_data;	/* if using sq thread polling */
375 
376 	struct wait_queue_head	sqo_sq_wait;
377 	struct list_head	sqd_list;
378 
379 	unsigned int		file_alloc_start;
380 	unsigned int		file_alloc_end;
381 
382 	struct list_head	io_buffers_cache;
383 
384 	/* Keep this last, we don't need it for the fast path */
385 	struct wait_queue_head		poll_wq;
386 	struct io_restriction		restrictions;
387 
388 	u32			pers_next;
389 	struct xarray		personalities;
390 
391 	/* hashed buffered write serialization */
392 	struct io_wq_hash		*hash_map;
393 
394 	/* Only used for accounting purposes */
395 	struct user_struct		*user;
396 	struct mm_struct		*mm_account;
397 
398 	/* ctx exit and cancelation */
399 	struct llist_head		fallback_llist;
400 	struct delayed_work		fallback_work;
401 	struct work_struct		exit_work;
402 	struct list_head		tctx_list;
403 	struct completion		ref_comp;
404 
405 	/* io-wq management, e.g. thread count */
406 	u32				iowq_limits[2];
407 
408 	struct callback_head		poll_wq_task_work;
409 	struct list_head		defer_list;
410 
411 	struct io_alloc_cache		msg_cache;
412 	spinlock_t			msg_lock;
413 
414 #ifdef CONFIG_NET_RX_BUSY_POLL
415 	struct list_head	napi_list;	/* track busy poll napi_id */
416 	spinlock_t		napi_lock;	/* napi_list lock */
417 
418 	/* napi busy poll default timeout */
419 	ktime_t			napi_busy_poll_dt;
420 	bool			napi_prefer_busy_poll;
421 	u8			napi_track_mode;
422 
423 	DECLARE_HASHTABLE(napi_ht, 4);
424 #endif
425 
426 	/* protected by ->completion_lock */
427 	unsigned			evfd_last_cq_tail;
428 
429 	/*
430 	 * Protection for resize vs mmap races - both the mmap and resize
431 	 * side will need to grab this lock, to prevent either side from
432 	 * being run concurrently with the other.
433 	 */
434 	struct mutex			mmap_lock;
435 
436 	struct io_mapped_region		sq_region;
437 	struct io_mapped_region		ring_region;
438 	/* used for optimised request parameter and wait argument passing  */
439 	struct io_mapped_region		param_region;
440 };
441 
442 struct io_tw_state {
443 };
444 
445 enum {
446 	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
447 	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
448 	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
449 	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
450 	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
451 	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
452 	REQ_F_CQE_SKIP_BIT	= IOSQE_CQE_SKIP_SUCCESS_BIT,
453 
454 	/* first byte is taken by user flags, shift it to not overlap */
455 	REQ_F_FAIL_BIT		= 8,
456 	REQ_F_INFLIGHT_BIT,
457 	REQ_F_CUR_POS_BIT,
458 	REQ_F_NOWAIT_BIT,
459 	REQ_F_LINK_TIMEOUT_BIT,
460 	REQ_F_NEED_CLEANUP_BIT,
461 	REQ_F_POLLED_BIT,
462 	REQ_F_HYBRID_IOPOLL_STATE_BIT,
463 	REQ_F_BUFFER_SELECTED_BIT,
464 	REQ_F_BUFFER_RING_BIT,
465 	REQ_F_REISSUE_BIT,
466 	REQ_F_CREDS_BIT,
467 	REQ_F_REFCOUNT_BIT,
468 	REQ_F_ARM_LTIMEOUT_BIT,
469 	REQ_F_ASYNC_DATA_BIT,
470 	REQ_F_SKIP_LINK_CQES_BIT,
471 	REQ_F_SINGLE_POLL_BIT,
472 	REQ_F_DOUBLE_POLL_BIT,
473 	REQ_F_APOLL_MULTISHOT_BIT,
474 	REQ_F_CLEAR_POLLIN_BIT,
475 	/* keep async read/write and isreg together and in order */
476 	REQ_F_SUPPORT_NOWAIT_BIT,
477 	REQ_F_ISREG_BIT,
478 	REQ_F_POLL_NO_LAZY_BIT,
479 	REQ_F_CAN_POLL_BIT,
480 	REQ_F_BL_EMPTY_BIT,
481 	REQ_F_BL_NO_RECYCLE_BIT,
482 	REQ_F_BUFFERS_COMMIT_BIT,
483 	REQ_F_BUF_NODE_BIT,
484 	REQ_F_HAS_METADATA_BIT,
485 
486 	/* not a real bit, just to check we're not overflowing the space */
487 	__REQ_F_LAST_BIT,
488 };
489 
490 typedef u64 __bitwise io_req_flags_t;
491 #define IO_REQ_FLAG(bitno)	((__force io_req_flags_t) BIT_ULL((bitno)))
492 
493 enum {
494 	/* ctx owns file */
495 	REQ_F_FIXED_FILE	= IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
496 	/* drain existing IO first */
497 	REQ_F_IO_DRAIN		= IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
498 	/* linked sqes */
499 	REQ_F_LINK		= IO_REQ_FLAG(REQ_F_LINK_BIT),
500 	/* doesn't sever on completion < 0 */
501 	REQ_F_HARDLINK		= IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
502 	/* IOSQE_ASYNC */
503 	REQ_F_FORCE_ASYNC	= IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
504 	/* IOSQE_BUFFER_SELECT */
505 	REQ_F_BUFFER_SELECT	= IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
506 	/* IOSQE_CQE_SKIP_SUCCESS */
507 	REQ_F_CQE_SKIP		= IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
508 
509 	/* fail rest of links */
510 	REQ_F_FAIL		= IO_REQ_FLAG(REQ_F_FAIL_BIT),
511 	/* on inflight list, should be cancelled and waited on exit reliably */
512 	REQ_F_INFLIGHT		= IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
513 	/* read/write uses file position */
514 	REQ_F_CUR_POS		= IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
515 	/* must not punt to workers */
516 	REQ_F_NOWAIT		= IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
517 	/* has or had linked timeout */
518 	REQ_F_LINK_TIMEOUT	= IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
519 	/* needs cleanup */
520 	REQ_F_NEED_CLEANUP	= IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
521 	/* already went through poll handler */
522 	REQ_F_POLLED		= IO_REQ_FLAG(REQ_F_POLLED_BIT),
523 	/* every req only blocks once in hybrid poll */
524 	REQ_F_IOPOLL_STATE        = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
525 	/* buffer already selected */
526 	REQ_F_BUFFER_SELECTED	= IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
527 	/* buffer selected from ring, needs commit */
528 	REQ_F_BUFFER_RING	= IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
529 	/* caller should reissue async */
530 	REQ_F_REISSUE		= IO_REQ_FLAG(REQ_F_REISSUE_BIT),
531 	/* supports async reads/writes */
532 	REQ_F_SUPPORT_NOWAIT	= IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
533 	/* regular file */
534 	REQ_F_ISREG		= IO_REQ_FLAG(REQ_F_ISREG_BIT),
535 	/* has creds assigned */
536 	REQ_F_CREDS		= IO_REQ_FLAG(REQ_F_CREDS_BIT),
537 	/* skip refcounting if not set */
538 	REQ_F_REFCOUNT		= IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
539 	/* there is a linked timeout that has to be armed */
540 	REQ_F_ARM_LTIMEOUT	= IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
541 	/* ->async_data allocated */
542 	REQ_F_ASYNC_DATA	= IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
543 	/* don't post CQEs while failing linked requests */
544 	REQ_F_SKIP_LINK_CQES	= IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
545 	/* single poll may be active */
546 	REQ_F_SINGLE_POLL	= IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
547 	/* double poll may active */
548 	REQ_F_DOUBLE_POLL	= IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
549 	/* fast poll multishot mode */
550 	REQ_F_APOLL_MULTISHOT	= IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
551 	/* recvmsg special flag, clear EPOLLIN */
552 	REQ_F_CLEAR_POLLIN	= IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
553 	/* don't use lazy poll wake for this request */
554 	REQ_F_POLL_NO_LAZY	= IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
555 	/* file is pollable */
556 	REQ_F_CAN_POLL		= IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
557 	/* buffer list was empty after selection of buffer */
558 	REQ_F_BL_EMPTY		= IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
559 	/* don't recycle provided buffers for this request */
560 	REQ_F_BL_NO_RECYCLE	= IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
561 	/* buffer ring head needs incrementing on put */
562 	REQ_F_BUFFERS_COMMIT	= IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
563 	/* buf node is valid */
564 	REQ_F_BUF_NODE		= IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
565 	/* request has read/write metadata assigned */
566 	REQ_F_HAS_METADATA	= IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
567 };
568 
569 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
570 
571 struct io_task_work {
572 	struct llist_node		node;
573 	io_req_tw_func_t		func;
574 };
575 
576 struct io_cqe {
577 	__u64	user_data;
578 	__s32	res;
579 	/* fd initially, then cflags for completion */
580 	union {
581 		__u32	flags;
582 		int	fd;
583 	};
584 };
585 
586 /*
587  * Each request type overlays its private data structure on top of this one.
588  * They must not exceed this one in size.
589  */
590 struct io_cmd_data {
591 	struct file		*file;
592 	/* each command gets 56 bytes of data */
593 	__u8			data[56];
594 };
595 
io_kiocb_cmd_sz_check(size_t cmd_sz)596 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
597 {
598 	BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
599 }
600 #define io_kiocb_to_cmd(req, cmd_type) ( \
601 	io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
602 	((cmd_type *)&(req)->cmd) \
603 )
604 #define cmd_to_io_kiocb(ptr)	((struct io_kiocb *) ptr)
605 
606 struct io_kiocb {
607 	union {
608 		/*
609 		 * NOTE! Each of the io_kiocb union members has the file pointer
610 		 * as the first entry in their struct definition. So you can
611 		 * access the file pointer through any of the sub-structs,
612 		 * or directly as just 'file' in this struct.
613 		 */
614 		struct file		*file;
615 		struct io_cmd_data	cmd;
616 	};
617 
618 	u8				opcode;
619 	/* polled IO has completed */
620 	u8				iopoll_completed;
621 	/*
622 	 * Can be either a fixed buffer index, or used with provided buffers.
623 	 * For the latter, before issue it points to the buffer group ID,
624 	 * and after selection it points to the buffer ID itself.
625 	 */
626 	u16				buf_index;
627 
628 	unsigned			nr_tw;
629 
630 	/* REQ_F_* flags */
631 	io_req_flags_t			flags;
632 
633 	struct io_cqe			cqe;
634 
635 	struct io_ring_ctx		*ctx;
636 	struct io_uring_task		*tctx;
637 
638 	union {
639 		/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
640 		struct io_buffer	*kbuf;
641 
642 		/*
643 		 * stores buffer ID for ring provided buffers, valid IFF
644 		 * REQ_F_BUFFER_RING is set.
645 		 */
646 		struct io_buffer_list	*buf_list;
647 
648 		struct io_rsrc_node	*buf_node;
649 	};
650 
651 	union {
652 		/* used by request caches, completion batching and iopoll */
653 		struct io_wq_work_node	comp_list;
654 		/* cache ->apoll->events */
655 		__poll_t apoll_events;
656 	};
657 
658 	struct io_rsrc_node		*file_node;
659 
660 	atomic_t			refs;
661 	bool				cancel_seq_set;
662 	struct io_task_work		io_task_work;
663 	union {
664 		/*
665 		 * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
666 		 * poll
667 		 */
668 		struct hlist_node	hash_node;
669 		/* For IOPOLL setup queues, with hybrid polling */
670 		u64                     iopoll_start;
671 	};
672 	/* internal polling, see IORING_FEAT_FAST_POLL */
673 	struct async_poll		*apoll;
674 	/* opcode allocated if it needs to store data for async defer */
675 	void				*async_data;
676 	/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
677 	atomic_t			poll_refs;
678 	struct io_kiocb			*link;
679 	/* custom credentials, valid IFF REQ_F_CREDS is set */
680 	const struct cred		*creds;
681 	struct io_wq_work		work;
682 
683 	struct {
684 		u64			extra1;
685 		u64			extra2;
686 	} big_cqe;
687 };
688 
689 struct io_overflow_cqe {
690 	struct list_head list;
691 	struct io_uring_cqe cqe;
692 };
693 
io_ctx_cqe32(struct io_ring_ctx * ctx)694 static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
695 {
696 	return ctx->flags & IORING_SETUP_CQE32;
697 }
698 
699 #endif
700