xref: /linux/include/linux/io_uring_types.h (revision 7c05bd92305d13e18945270b7bfaf300d53f6ed2)
1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
3 
4 #include <linux/blkdev.h>
5 #include <linux/hashtable.h>
6 #include <linux/task_work.h>
7 #include <linux/bitmap.h>
8 #include <linux/llist.h>
9 #include <uapi/linux/io_uring.h>
10 
11 enum {
12 	/*
13 	 * A hint to not wake right away but delay until there are enough of
14 	 * tw's queued to match the number of CQEs the task is waiting for.
15 	 *
16 	 * Must not be used with requests generating more than one CQE.
17 	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
18 	 */
19 	IOU_F_TWQ_LAZY_WAKE			= 1,
20 };
21 
22 enum io_uring_cmd_flags {
23 	IO_URING_F_COMPLETE_DEFER	= 1,
24 	IO_URING_F_UNLOCKED		= 2,
25 	/* the request is executed from poll, it should not be freed */
26 	IO_URING_F_MULTISHOT		= 4,
27 	/* executed by io-wq */
28 	IO_URING_F_IOWQ			= 8,
29 	/* int's last bit, sign checks are usually faster than a bit test */
30 	IO_URING_F_NONBLOCK		= INT_MIN,
31 
32 	/* ctx state flags, for URING_CMD */
33 	IO_URING_F_SQE128		= (1 << 8),
34 	IO_URING_F_CQE32		= (1 << 9),
35 	IO_URING_F_IOPOLL		= (1 << 10),
36 
37 	/* set when uring wants to cancel a previously issued command */
38 	IO_URING_F_CANCEL		= (1 << 11),
39 	IO_URING_F_COMPAT		= (1 << 12),
40 	IO_URING_F_TASK_DEAD		= (1 << 13),
41 };
42 
43 struct io_wq_work_node {
44 	struct io_wq_work_node *next;
45 };
46 
47 struct io_wq_work_list {
48 	struct io_wq_work_node *first;
49 	struct io_wq_work_node *last;
50 };
51 
52 struct io_wq_work {
53 	struct io_wq_work_node list;
54 	atomic_t flags;
55 	/* place it here instead of io_kiocb as it fills padding and saves 4B */
56 	int cancel_seq;
57 };
58 
59 struct io_rsrc_data {
60 	unsigned int			nr;
61 	struct io_rsrc_node		**nodes;
62 };
63 
64 struct io_file_table {
65 	struct io_rsrc_data data;
66 	unsigned long *bitmap;
67 	unsigned int alloc_hint;
68 };
69 
70 struct io_hash_bucket {
71 	struct hlist_head	list;
72 } ____cacheline_aligned_in_smp;
73 
74 struct io_hash_table {
75 	struct io_hash_bucket	*hbs;
76 	unsigned		hash_bits;
77 };
78 
79 struct io_mapped_region {
80 	struct page		**pages;
81 	void			*vmap_ptr;
82 	size_t			nr_pages;
83 };
84 
85 /*
86  * Arbitrary limit, can be raised if need be
87  */
88 #define IO_RINGFD_REG_MAX 16
89 
90 struct io_uring_task {
91 	/* submission side */
92 	int				cached_refs;
93 	const struct io_ring_ctx 	*last;
94 	struct task_struct		*task;
95 	struct io_wq			*io_wq;
96 	struct file			*registered_rings[IO_RINGFD_REG_MAX];
97 
98 	struct xarray			xa;
99 	struct wait_queue_head		wait;
100 	atomic_t			in_cancel;
101 	atomic_t			inflight_tracked;
102 	struct percpu_counter		inflight;
103 
104 	struct { /* task_work */
105 		struct llist_head	task_list;
106 		struct callback_head	task_work;
107 	} ____cacheline_aligned_in_smp;
108 };
109 
110 struct io_uring {
111 	u32 head;
112 	u32 tail;
113 };
114 
115 /*
116  * This data is shared with the application through the mmap at offsets
117  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
118  *
119  * The offsets to the member fields are published through struct
120  * io_sqring_offsets when calling io_uring_setup.
121  */
122 struct io_rings {
123 	/*
124 	 * Head and tail offsets into the ring; the offsets need to be
125 	 * masked to get valid indices.
126 	 *
127 	 * The kernel controls head of the sq ring and the tail of the cq ring,
128 	 * and the application controls tail of the sq ring and the head of the
129 	 * cq ring.
130 	 */
131 	struct io_uring		sq, cq;
132 	/*
133 	 * Bitmasks to apply to head and tail offsets (constant, equals
134 	 * ring_entries - 1)
135 	 */
136 	u32			sq_ring_mask, cq_ring_mask;
137 	/* Ring sizes (constant, power of 2) */
138 	u32			sq_ring_entries, cq_ring_entries;
139 	/*
140 	 * Number of invalid entries dropped by the kernel due to
141 	 * invalid index stored in array
142 	 *
143 	 * Written by the kernel, shouldn't be modified by the
144 	 * application (i.e. get number of "new events" by comparing to
145 	 * cached value).
146 	 *
147 	 * After a new SQ head value was read by the application this
148 	 * counter includes all submissions that were dropped reaching
149 	 * the new SQ head (and possibly more).
150 	 */
151 	u32			sq_dropped;
152 	/*
153 	 * Runtime SQ flags
154 	 *
155 	 * Written by the kernel, shouldn't be modified by the
156 	 * application.
157 	 *
158 	 * The application needs a full memory barrier before checking
159 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
160 	 */
161 	atomic_t		sq_flags;
162 	/*
163 	 * Runtime CQ flags
164 	 *
165 	 * Written by the application, shouldn't be modified by the
166 	 * kernel.
167 	 */
168 	u32			cq_flags;
169 	/*
170 	 * Number of completion events lost because the queue was full;
171 	 * this should be avoided by the application by making sure
172 	 * there are not more requests pending than there is space in
173 	 * the completion queue.
174 	 *
175 	 * Written by the kernel, shouldn't be modified by the
176 	 * application (i.e. get number of "new events" by comparing to
177 	 * cached value).
178 	 *
179 	 * As completion events come in out of order this counter is not
180 	 * ordered with any other data.
181 	 */
182 	u32			cq_overflow;
183 	/*
184 	 * Ring buffer of completion events.
185 	 *
186 	 * The kernel writes completion events fresh every time they are
187 	 * produced, so the application is allowed to modify pending
188 	 * entries.
189 	 */
190 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
191 };
192 
193 struct io_restriction {
194 	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
195 	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
196 	u8 sqe_flags_allowed;
197 	u8 sqe_flags_required;
198 	bool registered;
199 };
200 
201 struct io_submit_link {
202 	struct io_kiocb		*head;
203 	struct io_kiocb		*last;
204 };
205 
206 struct io_submit_state {
207 	/* inline/task_work completion list, under ->uring_lock */
208 	struct io_wq_work_node	free_list;
209 	/* batch completion logic */
210 	struct io_wq_work_list	compl_reqs;
211 	struct io_submit_link	link;
212 
213 	bool			plug_started;
214 	bool			need_plug;
215 	bool			cq_flush;
216 	unsigned short		submit_nr;
217 	struct blk_plug		plug;
218 };
219 
220 struct io_alloc_cache {
221 	void			**entries;
222 	unsigned int		nr_cached;
223 	unsigned int		max_cached;
224 	size_t			elem_size;
225 };
226 
227 struct io_ring_ctx {
228 	/* const or read-mostly hot data */
229 	struct {
230 		unsigned int		flags;
231 		unsigned int		drain_next: 1;
232 		unsigned int		restricted: 1;
233 		unsigned int		off_timeout_used: 1;
234 		unsigned int		drain_active: 1;
235 		unsigned int		has_evfd: 1;
236 		/* all CQEs should be posted only by the submitter task */
237 		unsigned int		task_complete: 1;
238 		unsigned int		lockless_cq: 1;
239 		unsigned int		syscall_iopoll: 1;
240 		unsigned int		poll_activated: 1;
241 		unsigned int		drain_disabled: 1;
242 		unsigned int		compat: 1;
243 		unsigned int		iowq_limits_set : 1;
244 
245 		struct task_struct	*submitter_task;
246 		struct io_rings		*rings;
247 		struct percpu_ref	refs;
248 
249 		clockid_t		clockid;
250 		enum tk_offsets		clock_offset;
251 
252 		enum task_work_notify_mode	notify_method;
253 		unsigned			sq_thread_idle;
254 	} ____cacheline_aligned_in_smp;
255 
256 	/* submission data */
257 	struct {
258 		struct mutex		uring_lock;
259 
260 		/*
261 		 * Ring buffer of indices into array of io_uring_sqe, which is
262 		 * mmapped by the application using the IORING_OFF_SQES offset.
263 		 *
264 		 * This indirection could e.g. be used to assign fixed
265 		 * io_uring_sqe entries to operations and only submit them to
266 		 * the queue when needed.
267 		 *
268 		 * The kernel modifies neither the indices array nor the entries
269 		 * array.
270 		 */
271 		u32			*sq_array;
272 		struct io_uring_sqe	*sq_sqes;
273 		unsigned		cached_sq_head;
274 		unsigned		sq_entries;
275 
276 		/*
277 		 * Fixed resources fast path, should be accessed only under
278 		 * uring_lock, and updated through io_uring_register(2)
279 		 */
280 		atomic_t		cancel_seq;
281 
282 		/*
283 		 * ->iopoll_list is protected by the ctx->uring_lock for
284 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
285 		 * For SQPOLL, only the single threaded io_sq_thread() will
286 		 * manipulate the list, hence no extra locking is needed there.
287 		 */
288 		bool			poll_multi_queue;
289 		struct io_wq_work_list	iopoll_list;
290 
291 		struct io_file_table	file_table;
292 		struct io_rsrc_data	buf_table;
293 
294 		struct io_submit_state	submit_state;
295 
296 		struct xarray		io_bl_xa;
297 
298 		struct io_hash_table	cancel_table;
299 		struct io_alloc_cache	apoll_cache;
300 		struct io_alloc_cache	netmsg_cache;
301 		struct io_alloc_cache	rw_cache;
302 		struct io_alloc_cache	uring_cache;
303 
304 		/*
305 		 * Any cancelable uring_cmd is added to this list in
306 		 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
307 		 */
308 		struct hlist_head	cancelable_uring_cmd;
309 		/*
310 		 * For Hybrid IOPOLL, runtime in hybrid polling, without
311 		 * scheduling time
312 		 */
313 		u64					hybrid_poll_time;
314 	} ____cacheline_aligned_in_smp;
315 
316 	struct {
317 		/*
318 		 * We cache a range of free CQEs we can use, once exhausted it
319 		 * should go through a slower range setup, see __io_get_cqe()
320 		 */
321 		struct io_uring_cqe	*cqe_cached;
322 		struct io_uring_cqe	*cqe_sentinel;
323 
324 		unsigned		cached_cq_tail;
325 		unsigned		cq_entries;
326 		struct io_ev_fd	__rcu	*io_ev_fd;
327 		unsigned		cq_extra;
328 
329 		void			*cq_wait_arg;
330 		size_t			cq_wait_size;
331 	} ____cacheline_aligned_in_smp;
332 
333 	/*
334 	 * task_work and async notification delivery cacheline. Expected to
335 	 * regularly bounce b/w CPUs.
336 	 */
337 	struct {
338 		struct llist_head	work_llist;
339 		struct llist_head	retry_llist;
340 		unsigned long		check_cq;
341 		atomic_t		cq_wait_nr;
342 		atomic_t		cq_timeouts;
343 		struct wait_queue_head	cq_wait;
344 	} ____cacheline_aligned_in_smp;
345 
346 	/* timeouts */
347 	struct {
348 		raw_spinlock_t		timeout_lock;
349 		struct list_head	timeout_list;
350 		struct list_head	ltimeout_list;
351 		unsigned		cq_last_tm_flush;
352 	} ____cacheline_aligned_in_smp;
353 
354 	spinlock_t		completion_lock;
355 
356 	struct list_head	io_buffers_comp;
357 	struct list_head	cq_overflow_list;
358 
359 	struct hlist_head	waitid_list;
360 
361 #ifdef CONFIG_FUTEX
362 	struct hlist_head	futex_list;
363 	struct io_alloc_cache	futex_cache;
364 #endif
365 
366 	const struct cred	*sq_creds;	/* cred used for __io_sq_thread() */
367 	struct io_sq_data	*sq_data;	/* if using sq thread polling */
368 
369 	struct wait_queue_head	sqo_sq_wait;
370 	struct list_head	sqd_list;
371 
372 	unsigned int		file_alloc_start;
373 	unsigned int		file_alloc_end;
374 
375 	struct list_head	io_buffers_cache;
376 
377 	/* Keep this last, we don't need it for the fast path */
378 	struct wait_queue_head		poll_wq;
379 	struct io_restriction		restrictions;
380 
381 	u32			pers_next;
382 	struct xarray		personalities;
383 
384 	/* hashed buffered write serialization */
385 	struct io_wq_hash		*hash_map;
386 
387 	/* Only used for accounting purposes */
388 	struct user_struct		*user;
389 	struct mm_struct		*mm_account;
390 
391 	/* ctx exit and cancelation */
392 	struct llist_head		fallback_llist;
393 	struct delayed_work		fallback_work;
394 	struct work_struct		exit_work;
395 	struct list_head		tctx_list;
396 	struct completion		ref_comp;
397 
398 	/* io-wq management, e.g. thread count */
399 	u32				iowq_limits[2];
400 
401 	struct callback_head		poll_wq_task_work;
402 	struct list_head		defer_list;
403 
404 	struct io_alloc_cache		msg_cache;
405 	spinlock_t			msg_lock;
406 
407 #ifdef CONFIG_NET_RX_BUSY_POLL
408 	struct list_head	napi_list;	/* track busy poll napi_id */
409 	spinlock_t		napi_lock;	/* napi_list lock */
410 
411 	/* napi busy poll default timeout */
412 	ktime_t			napi_busy_poll_dt;
413 	bool			napi_prefer_busy_poll;
414 	u8			napi_track_mode;
415 
416 	DECLARE_HASHTABLE(napi_ht, 4);
417 #endif
418 
419 	/* protected by ->completion_lock */
420 	unsigned			evfd_last_cq_tail;
421 
422 	/*
423 	 * Protection for resize vs mmap races - both the mmap and resize
424 	 * side will need to grab this lock, to prevent either side from
425 	 * being run concurrently with the other.
426 	 */
427 	struct mutex			resize_lock;
428 
429 	/*
430 	 * If IORING_SETUP_NO_MMAP is used, then the below holds
431 	 * the gup'ed pages for the two rings, and the sqes.
432 	 */
433 	unsigned short			n_ring_pages;
434 	unsigned short			n_sqe_pages;
435 	struct page			**ring_pages;
436 	struct page			**sqe_pages;
437 
438 	/* used for optimised request parameter and wait argument passing  */
439 	struct io_mapped_region		param_region;
440 };
441 
442 struct io_tw_state {
443 };
444 
445 enum {
446 	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
447 	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
448 	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
449 	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
450 	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
451 	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
452 	REQ_F_CQE_SKIP_BIT	= IOSQE_CQE_SKIP_SUCCESS_BIT,
453 
454 	/* first byte is taken by user flags, shift it to not overlap */
455 	REQ_F_FAIL_BIT		= 8,
456 	REQ_F_INFLIGHT_BIT,
457 	REQ_F_CUR_POS_BIT,
458 	REQ_F_NOWAIT_BIT,
459 	REQ_F_LINK_TIMEOUT_BIT,
460 	REQ_F_NEED_CLEANUP_BIT,
461 	REQ_F_POLLED_BIT,
462 	REQ_F_HYBRID_IOPOLL_STATE_BIT,
463 	REQ_F_BUFFER_SELECTED_BIT,
464 	REQ_F_BUFFER_RING_BIT,
465 	REQ_F_REISSUE_BIT,
466 	REQ_F_CREDS_BIT,
467 	REQ_F_REFCOUNT_BIT,
468 	REQ_F_ARM_LTIMEOUT_BIT,
469 	REQ_F_ASYNC_DATA_BIT,
470 	REQ_F_SKIP_LINK_CQES_BIT,
471 	REQ_F_SINGLE_POLL_BIT,
472 	REQ_F_DOUBLE_POLL_BIT,
473 	REQ_F_APOLL_MULTISHOT_BIT,
474 	REQ_F_CLEAR_POLLIN_BIT,
475 	/* keep async read/write and isreg together and in order */
476 	REQ_F_SUPPORT_NOWAIT_BIT,
477 	REQ_F_ISREG_BIT,
478 	REQ_F_POLL_NO_LAZY_BIT,
479 	REQ_F_CAN_POLL_BIT,
480 	REQ_F_BL_EMPTY_BIT,
481 	REQ_F_BL_NO_RECYCLE_BIT,
482 	REQ_F_BUFFERS_COMMIT_BIT,
483 	REQ_F_BUF_NODE_BIT,
484 
485 	/* not a real bit, just to check we're not overflowing the space */
486 	__REQ_F_LAST_BIT,
487 };
488 
489 typedef u64 __bitwise io_req_flags_t;
490 #define IO_REQ_FLAG(bitno)	((__force io_req_flags_t) BIT_ULL((bitno)))
491 
492 enum {
493 	/* ctx owns file */
494 	REQ_F_FIXED_FILE	= IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
495 	/* drain existing IO first */
496 	REQ_F_IO_DRAIN		= IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
497 	/* linked sqes */
498 	REQ_F_LINK		= IO_REQ_FLAG(REQ_F_LINK_BIT),
499 	/* doesn't sever on completion < 0 */
500 	REQ_F_HARDLINK		= IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
501 	/* IOSQE_ASYNC */
502 	REQ_F_FORCE_ASYNC	= IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
503 	/* IOSQE_BUFFER_SELECT */
504 	REQ_F_BUFFER_SELECT	= IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
505 	/* IOSQE_CQE_SKIP_SUCCESS */
506 	REQ_F_CQE_SKIP		= IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
507 
508 	/* fail rest of links */
509 	REQ_F_FAIL		= IO_REQ_FLAG(REQ_F_FAIL_BIT),
510 	/* on inflight list, should be cancelled and waited on exit reliably */
511 	REQ_F_INFLIGHT		= IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
512 	/* read/write uses file position */
513 	REQ_F_CUR_POS		= IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
514 	/* must not punt to workers */
515 	REQ_F_NOWAIT		= IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
516 	/* has or had linked timeout */
517 	REQ_F_LINK_TIMEOUT	= IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
518 	/* needs cleanup */
519 	REQ_F_NEED_CLEANUP	= IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
520 	/* already went through poll handler */
521 	REQ_F_POLLED		= IO_REQ_FLAG(REQ_F_POLLED_BIT),
522 	/* every req only blocks once in hybrid poll */
523 	REQ_F_IOPOLL_STATE        = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
524 	/* buffer already selected */
525 	REQ_F_BUFFER_SELECTED	= IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
526 	/* buffer selected from ring, needs commit */
527 	REQ_F_BUFFER_RING	= IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
528 	/* caller should reissue async */
529 	REQ_F_REISSUE		= IO_REQ_FLAG(REQ_F_REISSUE_BIT),
530 	/* supports async reads/writes */
531 	REQ_F_SUPPORT_NOWAIT	= IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
532 	/* regular file */
533 	REQ_F_ISREG		= IO_REQ_FLAG(REQ_F_ISREG_BIT),
534 	/* has creds assigned */
535 	REQ_F_CREDS		= IO_REQ_FLAG(REQ_F_CREDS_BIT),
536 	/* skip refcounting if not set */
537 	REQ_F_REFCOUNT		= IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
538 	/* there is a linked timeout that has to be armed */
539 	REQ_F_ARM_LTIMEOUT	= IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
540 	/* ->async_data allocated */
541 	REQ_F_ASYNC_DATA	= IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
542 	/* don't post CQEs while failing linked requests */
543 	REQ_F_SKIP_LINK_CQES	= IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
544 	/* single poll may be active */
545 	REQ_F_SINGLE_POLL	= IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
546 	/* double poll may active */
547 	REQ_F_DOUBLE_POLL	= IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
548 	/* fast poll multishot mode */
549 	REQ_F_APOLL_MULTISHOT	= IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
550 	/* recvmsg special flag, clear EPOLLIN */
551 	REQ_F_CLEAR_POLLIN	= IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
552 	/* don't use lazy poll wake for this request */
553 	REQ_F_POLL_NO_LAZY	= IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
554 	/* file is pollable */
555 	REQ_F_CAN_POLL		= IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
556 	/* buffer list was empty after selection of buffer */
557 	REQ_F_BL_EMPTY		= IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
558 	/* don't recycle provided buffers for this request */
559 	REQ_F_BL_NO_RECYCLE	= IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
560 	/* buffer ring head needs incrementing on put */
561 	REQ_F_BUFFERS_COMMIT	= IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
562 	/* buf node is valid */
563 	REQ_F_BUF_NODE		= IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
564 };
565 
566 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
567 
568 struct io_task_work {
569 	struct llist_node		node;
570 	io_req_tw_func_t		func;
571 };
572 
573 struct io_cqe {
574 	__u64	user_data;
575 	__s32	res;
576 	/* fd initially, then cflags for completion */
577 	union {
578 		__u32	flags;
579 		int	fd;
580 	};
581 };
582 
583 /*
584  * Each request type overlays its private data structure on top of this one.
585  * They must not exceed this one in size.
586  */
587 struct io_cmd_data {
588 	struct file		*file;
589 	/* each command gets 56 bytes of data */
590 	__u8			data[56];
591 };
592 
io_kiocb_cmd_sz_check(size_t cmd_sz)593 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
594 {
595 	BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
596 }
597 #define io_kiocb_to_cmd(req, cmd_type) ( \
598 	io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
599 	((cmd_type *)&(req)->cmd) \
600 )
601 #define cmd_to_io_kiocb(ptr)	((struct io_kiocb *) ptr)
602 
603 struct io_kiocb {
604 	union {
605 		/*
606 		 * NOTE! Each of the io_kiocb union members has the file pointer
607 		 * as the first entry in their struct definition. So you can
608 		 * access the file pointer through any of the sub-structs,
609 		 * or directly as just 'file' in this struct.
610 		 */
611 		struct file		*file;
612 		struct io_cmd_data	cmd;
613 	};
614 
615 	u8				opcode;
616 	/* polled IO has completed */
617 	u8				iopoll_completed;
618 	/*
619 	 * Can be either a fixed buffer index, or used with provided buffers.
620 	 * For the latter, before issue it points to the buffer group ID,
621 	 * and after selection it points to the buffer ID itself.
622 	 */
623 	u16				buf_index;
624 
625 	unsigned			nr_tw;
626 
627 	/* REQ_F_* flags */
628 	io_req_flags_t			flags;
629 
630 	struct io_cqe			cqe;
631 
632 	struct io_ring_ctx		*ctx;
633 	struct io_uring_task		*tctx;
634 
635 	union {
636 		/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
637 		struct io_buffer	*kbuf;
638 
639 		/*
640 		 * stores buffer ID for ring provided buffers, valid IFF
641 		 * REQ_F_BUFFER_RING is set.
642 		 */
643 		struct io_buffer_list	*buf_list;
644 
645 		struct io_rsrc_node	*buf_node;
646 	};
647 
648 	union {
649 		/* used by request caches, completion batching and iopoll */
650 		struct io_wq_work_node	comp_list;
651 		/* cache ->apoll->events */
652 		__poll_t apoll_events;
653 	};
654 
655 	struct io_rsrc_node		*file_node;
656 
657 	atomic_t			refs;
658 	bool				cancel_seq_set;
659 	struct io_task_work		io_task_work;
660 	union {
661 		/*
662 		 * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
663 		 * poll
664 		 */
665 		struct hlist_node	hash_node;
666 		/* For IOPOLL setup queues, with hybrid polling */
667 		u64                     iopoll_start;
668 	};
669 	/* internal polling, see IORING_FEAT_FAST_POLL */
670 	struct async_poll		*apoll;
671 	/* opcode allocated if it needs to store data for async defer */
672 	void				*async_data;
673 	/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
674 	atomic_t			poll_refs;
675 	struct io_kiocb			*link;
676 	/* custom credentials, valid IFF REQ_F_CREDS is set */
677 	const struct cred		*creds;
678 	struct io_wq_work		work;
679 
680 	struct {
681 		u64			extra1;
682 		u64			extra2;
683 	} big_cqe;
684 };
685 
686 struct io_overflow_cqe {
687 	struct list_head list;
688 	struct io_uring_cqe cqe;
689 };
690 
io_ctx_cqe32(struct io_ring_ctx * ctx)691 static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
692 {
693 	return ctx->flags & IORING_SETUP_CQE32;
694 }
695 
696 #endif
697