19f5834c8SLukas Bulwahn /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ 22b188cc1SJens Axboe /* 32b188cc1SJens Axboe * Header file for the io_uring interface. 42b188cc1SJens Axboe * 52b188cc1SJens Axboe * Copyright (C) 2019 Jens Axboe 62b188cc1SJens Axboe * Copyright (C) 2019 Christoph Hellwig 72b188cc1SJens Axboe */ 82b188cc1SJens Axboe #ifndef LINUX_IO_URING_H 92b188cc1SJens Axboe #define LINUX_IO_URING_H 102b188cc1SJens Axboe 112b188cc1SJens Axboe #include <linux/fs.h> 122b188cc1SJens Axboe #include <linux/types.h> 139eb80340SStefan Metzmacher /* 149eb80340SStefan Metzmacher * this file is shared with liburing and that has to autodetect 159eb80340SStefan Metzmacher * if linux/time_types.h is available or not, it can 169eb80340SStefan Metzmacher * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 179eb80340SStefan Metzmacher * if linux/time_types.h is not available 189eb80340SStefan Metzmacher */ 199eb80340SStefan Metzmacher #ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 2078a861b9SJens Axboe #include <linux/time_types.h> 219eb80340SStefan Metzmacher #endif 222b188cc1SJens Axboe 23e1d0c6d0SAmmar Faizi #ifdef __cplusplus 24e1d0c6d0SAmmar Faizi extern "C" { 25e1d0c6d0SAmmar Faizi #endif 26e1d0c6d0SAmmar Faizi 272b188cc1SJens Axboe /* 282b188cc1SJens Axboe * IO submission data structure (Submission Queue Entry) 292b188cc1SJens Axboe */ 302b188cc1SJens Axboe struct io_uring_sqe { 312b188cc1SJens Axboe __u8 opcode; /* type of operation for this sqe */ 326b06314cSJens Axboe __u8 flags; /* IOSQE_ flags */ 332b188cc1SJens Axboe __u16 ioprio; /* ioprio for the request */ 342b188cc1SJens Axboe __s32 fd; /* file descriptor to do IO on */ 3517f2fe35SJens Axboe union { 362b188cc1SJens Axboe __u64 off; /* offset into file */ 3717f2fe35SJens Axboe __u64 addr2; 38bdb2c48eSPavel Begunkov struct { 39ee692a21SJens Axboe __u32 cmd_op; 40bdb2c48eSPavel Begunkov __u32 __pad1; 41bdb2c48eSPavel Begunkov }; 4217f2fe35SJens Axboe }; 437d67af2cSPavel Begunkov union { 442b188cc1SJens Axboe __u64 addr; /* pointer to buffer or iovecs */ 457d67af2cSPavel Begunkov __u64 splice_off_in; 46a5d2f99aSBreno Leitao struct { 47a5d2f99aSBreno Leitao __u32 level; 48a5d2f99aSBreno Leitao __u32 optname; 49a5d2f99aSBreno Leitao }; 507d67af2cSPavel Begunkov }; 512b188cc1SJens Axboe __u32 len; /* buffer size or number of iovecs */ 522b188cc1SJens Axboe union { 532b188cc1SJens Axboe __kernel_rwf_t rw_flags; 54c992fe29SChristoph Hellwig __u32 fsync_flags; 555769a351SJiufei Xue __u16 poll_events; /* compatibility */ 565769a351SJiufei Xue __u32 poll32_events; /* word-reversed for BE */ 575d17b4a4SJens Axboe __u32 sync_range_flags; 580fa03c62SJens Axboe __u32 msg_flags; 595262f567SJens Axboe __u32 timeout_flags; 6017f2fe35SJens Axboe __u32 accept_flags; 6162755e35SJens Axboe __u32 cancel_flags; 6215b71abeSJens Axboe __u32 open_flags; 63eddc7ef5SJens Axboe __u32 statx_flags; 644840e418SJens Axboe __u32 fadvise_advice; 657d67af2cSPavel Begunkov __u32 splice_flags; 6680a261fdSJens Axboe __u32 rename_flags; 6714a1143bSJens Axboe __u32 unlink_flags; 68cf30da90SDmitry Kadashev __u32 hardlink_flags; 69e9621e2bSStefan Roesch __u32 xattr_flags; 70e6130ebaSJens Axboe __u32 msg_ring_flags; 719cda70f6SAnuj Gupta __u32 uring_cmd_flags; 72f31ecf67SJens Axboe __u32 waitid_flags; 73194bb58cSJens Axboe __u32 futex_flags; 74dc18b89aSJens Axboe __u32 install_fd_flags; 752b188cc1SJens Axboe }; 762b188cc1SJens Axboe __u64 user_data; /* data to be passed back at completion time */ 77ddf0322dSJens Axboe /* pack this to avoid bogus arm OABI complaints */ 78ddf0322dSJens Axboe union { 7975c6a039SJens Axboe /* index into fixed buffers, if used */ 8075c6a039SJens Axboe __u16 buf_index; 81ddf0322dSJens Axboe /* for grouped buffer selection */ 82ddf0322dSJens Axboe __u16 buf_group; 83ddf0322dSJens Axboe } __attribute__((packed)); 8475c6a039SJens Axboe /* personality to use, if used */ 8575c6a039SJens Axboe __u16 personality; 86b9445598SPavel Begunkov union { 877d67af2cSPavel Begunkov __s32 splice_fd_in; 88b9445598SPavel Begunkov __u32 file_index; 89a5d2f99aSBreno Leitao __u32 optlen; 9006a5464bSPavel Begunkov struct { 91092aeedbSPavel Begunkov __u16 addr_len; 92b48c312bSPavel Begunkov __u16 __pad3[1]; 9306a5464bSPavel Begunkov }; 94b9445598SPavel Begunkov }; 95ee692a21SJens Axboe union { 96ee692a21SJens Axboe struct { 97e9621e2bSStefan Roesch __u64 addr3; 98e9621e2bSStefan Roesch __u64 __pad2[1]; 99edafcceeSJens Axboe }; 100a5d2f99aSBreno Leitao __u64 optval; 101ebdeb7c0SJens Axboe /* 102ee692a21SJens Axboe * If the ring is initialized with IORING_SETUP_SQE128, then 103ee692a21SJens Axboe * this field is used for 80 bytes of arbitrary command data 104ebdeb7c0SJens Axboe */ 105ee692a21SJens Axboe __u8 cmd[0]; 106ee692a21SJens Axboe }; 1072b188cc1SJens Axboe }; 1082b188cc1SJens Axboe 1091339f24bSJens Axboe /* 1101339f24bSJens Axboe * If sqe->file_index is set to this for opcodes that instantiate a new 1111339f24bSJens Axboe * direct descriptor (like openat/openat2/accept), then io_uring will allocate 1121339f24bSJens Axboe * an available direct descriptor instead of having the application pass one 1131339f24bSJens Axboe * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE 1141339f24bSJens Axboe * if the space is full. 1151339f24bSJens Axboe */ 1161339f24bSJens Axboe #define IORING_FILE_INDEX_ALLOC (~0U) 1171339f24bSJens Axboe 1186b47ee6eSPavel Begunkov enum { 1196b47ee6eSPavel Begunkov IOSQE_FIXED_FILE_BIT, 1206b47ee6eSPavel Begunkov IOSQE_IO_DRAIN_BIT, 1216b47ee6eSPavel Begunkov IOSQE_IO_LINK_BIT, 1226b47ee6eSPavel Begunkov IOSQE_IO_HARDLINK_BIT, 1236b47ee6eSPavel Begunkov IOSQE_ASYNC_BIT, 124bcda7baaSJens Axboe IOSQE_BUFFER_SELECT_BIT, 12504c76b41SPavel Begunkov IOSQE_CQE_SKIP_SUCCESS_BIT, 1266b47ee6eSPavel Begunkov }; 1276b47ee6eSPavel Begunkov 128def596e9SJens Axboe /* 1296b06314cSJens Axboe * sqe->flags 1306b06314cSJens Axboe */ 1316b47ee6eSPavel Begunkov /* use fixed fileset */ 1326b47ee6eSPavel Begunkov #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT) 1336b47ee6eSPavel Begunkov /* issue after inflight IO */ 1346b47ee6eSPavel Begunkov #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT) 1356b47ee6eSPavel Begunkov /* links next sqe */ 1366b47ee6eSPavel Begunkov #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT) 1376b47ee6eSPavel Begunkov /* like LINK, but stronger */ 1386b47ee6eSPavel Begunkov #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) 1396b47ee6eSPavel Begunkov /* always go async */ 1406b47ee6eSPavel Begunkov #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) 141bcda7baaSJens Axboe /* select buffer from sqe->buf_group */ 142bcda7baaSJens Axboe #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) 14304c76b41SPavel Begunkov /* don't post CQE if request succeeded */ 14404c76b41SPavel Begunkov #define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT) 1456b06314cSJens Axboe 1466b06314cSJens Axboe /* 147def596e9SJens Axboe * io_uring_setup() flags 148def596e9SJens Axboe */ 149def596e9SJens Axboe #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ 1506c271ce2SJens Axboe #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ 1516c271ce2SJens Axboe #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ 15233a107f0SJens Axboe #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ 1538110c1a6SJens Axboe #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ 15424369c2eSPavel Begunkov #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ 1557e84e1c7SStefano Garzarella #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ 156bcbb7bf6SJens Axboe #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ 157e1169f06SJens Axboe /* 158e1169f06SJens Axboe * Cooperative task running. When requests complete, they often require 159e1169f06SJens Axboe * forcing the submitter to transition to the kernel to complete. If this 160e1169f06SJens Axboe * flag is set, work will be done when the task transitions anyway, rather 161e1169f06SJens Axboe * than force an inter-processor interrupt reschedule. This avoids interrupting 162e1169f06SJens Axboe * a task running in userspace, and saves an IPI. 163e1169f06SJens Axboe */ 164e1169f06SJens Axboe #define IORING_SETUP_COOP_TASKRUN (1U << 8) 165ef060ea9SJens Axboe /* 166ef060ea9SJens Axboe * If COOP_TASKRUN is set, get notified if task work is available for 167ef060ea9SJens Axboe * running and a kernel transition would be needed to run it. This sets 168ef060ea9SJens Axboe * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. 169ef060ea9SJens Axboe */ 170ef060ea9SJens Axboe #define IORING_SETUP_TASKRUN_FLAG (1U << 9) 171ebdeb7c0SJens Axboe #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ 1727a51e5b4SStefan Roesch #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ 17397bbdc06SPavel Begunkov /* 17497bbdc06SPavel Begunkov * Only one task is allowed to submit requests 17597bbdc06SPavel Begunkov */ 17697bbdc06SPavel Begunkov #define IORING_SETUP_SINGLE_ISSUER (1U << 12) 177ebdeb7c0SJens Axboe 178c0e0d6baSDylan Yudaken /* 179c0e0d6baSDylan Yudaken * Defer running task work to get events. 180c0e0d6baSDylan Yudaken * Rather than running bits of task work whenever the task transitions 181c0e0d6baSDylan Yudaken * try to do it just before it is needed. 182c0e0d6baSDylan Yudaken */ 183c0e0d6baSDylan Yudaken #define IORING_SETUP_DEFER_TASKRUN (1U << 13) 184c0e0d6baSDylan Yudaken 18503d89a2dSJens Axboe /* 18603d89a2dSJens Axboe * Application provides the memory for the rings 18703d89a2dSJens Axboe */ 18803d89a2dSJens Axboe #define IORING_SETUP_NO_MMAP (1U << 14) 18903d89a2dSJens Axboe 1906e76ac59SJosh Triplett /* 1916e76ac59SJosh Triplett * Register the ring fd in itself for use with 1926e76ac59SJosh Triplett * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather 1936e76ac59SJosh Triplett * than an fd. 1946e76ac59SJosh Triplett */ 1956e76ac59SJosh Triplett #define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15) 1966e76ac59SJosh Triplett 1972af89abdSPavel Begunkov /* 1982af89abdSPavel Begunkov * Removes indirection through the SQ index array. 1992af89abdSPavel Begunkov */ 2002af89abdSPavel Begunkov #define IORING_SETUP_NO_SQARRAY (1U << 16) 2012af89abdSPavel Begunkov 202cc51eaa8SDylan Yudaken enum io_uring_op { 2039e3aa61aSJens Axboe IORING_OP_NOP, 2049e3aa61aSJens Axboe IORING_OP_READV, 2059e3aa61aSJens Axboe IORING_OP_WRITEV, 2069e3aa61aSJens Axboe IORING_OP_FSYNC, 2079e3aa61aSJens Axboe IORING_OP_READ_FIXED, 2089e3aa61aSJens Axboe IORING_OP_WRITE_FIXED, 2099e3aa61aSJens Axboe IORING_OP_POLL_ADD, 2109e3aa61aSJens Axboe IORING_OP_POLL_REMOVE, 2119e3aa61aSJens Axboe IORING_OP_SYNC_FILE_RANGE, 2129e3aa61aSJens Axboe IORING_OP_SENDMSG, 2139e3aa61aSJens Axboe IORING_OP_RECVMSG, 2149e3aa61aSJens Axboe IORING_OP_TIMEOUT, 2159e3aa61aSJens Axboe IORING_OP_TIMEOUT_REMOVE, 2169e3aa61aSJens Axboe IORING_OP_ACCEPT, 2179e3aa61aSJens Axboe IORING_OP_ASYNC_CANCEL, 2189e3aa61aSJens Axboe IORING_OP_LINK_TIMEOUT, 2199e3aa61aSJens Axboe IORING_OP_CONNECT, 220d63d1b5eSJens Axboe IORING_OP_FALLOCATE, 22115b71abeSJens Axboe IORING_OP_OPENAT, 222b5dba59eSJens Axboe IORING_OP_CLOSE, 223d9808cebSPavel Begunkov IORING_OP_FILES_UPDATE, 224eddc7ef5SJens Axboe IORING_OP_STATX, 2253a6820f2SJens Axboe IORING_OP_READ, 2263a6820f2SJens Axboe IORING_OP_WRITE, 2274840e418SJens Axboe IORING_OP_FADVISE, 228c1ca757bSJens Axboe IORING_OP_MADVISE, 229fddafaceSJens Axboe IORING_OP_SEND, 230fddafaceSJens Axboe IORING_OP_RECV, 231cebdb986SJens Axboe IORING_OP_OPENAT2, 2323e4827b0SJens Axboe IORING_OP_EPOLL_CTL, 2337d67af2cSPavel Begunkov IORING_OP_SPLICE, 234ddf0322dSJens Axboe IORING_OP_PROVIDE_BUFFERS, 235067524e9SJens Axboe IORING_OP_REMOVE_BUFFERS, 236f2a8d5c7SPavel Begunkov IORING_OP_TEE, 23736f4fa68SJens Axboe IORING_OP_SHUTDOWN, 23880a261fdSJens Axboe IORING_OP_RENAMEAT, 23914a1143bSJens Axboe IORING_OP_UNLINKAT, 240e34a02dcSDmitry Kadashev IORING_OP_MKDIRAT, 2417a8721f8SDmitry Kadashev IORING_OP_SYMLINKAT, 242cf30da90SDmitry Kadashev IORING_OP_LINKAT, 2434f57f06cSJens Axboe IORING_OP_MSG_RING, 244e9621e2bSStefan Roesch IORING_OP_FSETXATTR, 245e9621e2bSStefan Roesch IORING_OP_SETXATTR, 246a56834e0SStefan Roesch IORING_OP_FGETXATTR, 247a56834e0SStefan Roesch IORING_OP_GETXATTR, 2481374e08eSJens Axboe IORING_OP_SOCKET, 249ee692a21SJens Axboe IORING_OP_URING_CMD, 250b48c312bSPavel Begunkov IORING_OP_SEND_ZC, 251493108d9SPavel Begunkov IORING_OP_SENDMSG_ZC, 252fc68fcdaSJens Axboe IORING_OP_READ_MULTISHOT, 253f31ecf67SJens Axboe IORING_OP_WAITID, 254194bb58cSJens Axboe IORING_OP_FUTEX_WAIT, 255194bb58cSJens Axboe IORING_OP_FUTEX_WAKE, 2568f350194SJens Axboe IORING_OP_FUTEX_WAITV, 257dc18b89aSJens Axboe IORING_OP_FIXED_FD_INSTALL, 258b4bb1900STony Solomonik IORING_OP_FTRUNCATE, 2599e3aa61aSJens Axboe 2609e3aa61aSJens Axboe /* this goes last, obviously */ 2619e3aa61aSJens Axboe IORING_OP_LAST, 2629e3aa61aSJens Axboe }; 263c992fe29SChristoph Hellwig 264c992fe29SChristoph Hellwig /* 265528ce678SMing Lei * sqe->uring_cmd_flags top 8bits aren't available for userspace 2666dcabcd3SJens Axboe * IORING_URING_CMD_FIXED use registered buffer; pass this flag 2679cda70f6SAnuj Gupta * along with setting sqe->buf_index. 2689cda70f6SAnuj Gupta */ 2699cda70f6SAnuj Gupta #define IORING_URING_CMD_FIXED (1U << 0) 270528ce678SMing Lei #define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED 2719cda70f6SAnuj Gupta 2729cda70f6SAnuj Gupta 2739cda70f6SAnuj Gupta /* 274c992fe29SChristoph Hellwig * sqe->fsync_flags 275c992fe29SChristoph Hellwig */ 276c992fe29SChristoph Hellwig #define IORING_FSYNC_DATASYNC (1U << 0) 2772b188cc1SJens Axboe 2782b188cc1SJens Axboe /* 279a41525abSJens Axboe * sqe->timeout_flags 280a41525abSJens Axboe */ 281a41525abSJens Axboe #define IORING_TIMEOUT_ABS (1U << 0) 2829c8e11b3SPavel Begunkov #define IORING_TIMEOUT_UPDATE (1U << 1) 28350c1df2bSJens Axboe #define IORING_TIMEOUT_BOOTTIME (1U << 2) 28450c1df2bSJens Axboe #define IORING_TIMEOUT_REALTIME (1U << 3) 285f1042b6cSPavel Begunkov #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) 2866224590dSPavel Begunkov #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) 287ea97f6c8SDavid Wei #define IORING_TIMEOUT_MULTISHOT (1U << 6) 28850c1df2bSJens Axboe #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) 289f1042b6cSPavel Begunkov #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) 290a41525abSJens Axboe /* 2917d67af2cSPavel Begunkov * sqe->splice_flags 2927d67af2cSPavel Begunkov * extends splice(2) flags 2937d67af2cSPavel Begunkov */ 2947d67af2cSPavel Begunkov #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ 2957d67af2cSPavel Begunkov 2967d67af2cSPavel Begunkov /* 29788e41cf9SJens Axboe * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the 29888e41cf9SJens Axboe * command flags for POLL_ADD are stored in sqe->len. 29988e41cf9SJens Axboe * 30088e41cf9SJens Axboe * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if 30188e41cf9SJens Axboe * the poll handler will continue to report 30288e41cf9SJens Axboe * CQEs on behalf of the same SQE. 303b69de288SJens Axboe * 304b69de288SJens Axboe * IORING_POLL_UPDATE Update existing poll request, matching 305b69de288SJens Axboe * sqe->addr as the old user_data field. 306b9ba8a44SJens Axboe * 307b9ba8a44SJens Axboe * IORING_POLL_LEVEL Level triggered poll. 30888e41cf9SJens Axboe */ 30988e41cf9SJens Axboe #define IORING_POLL_ADD_MULTI (1U << 0) 310b69de288SJens Axboe #define IORING_POLL_UPDATE_EVENTS (1U << 1) 311b69de288SJens Axboe #define IORING_POLL_UPDATE_USER_DATA (1U << 2) 312b9ba8a44SJens Axboe #define IORING_POLL_ADD_LEVEL (1U << 3) 31388e41cf9SJens Axboe 31488e41cf9SJens Axboe /* 3158e29da69SJens Axboe * ASYNC_CANCEL flags. 3168e29da69SJens Axboe * 3178e29da69SJens Axboe * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key 3184bf94615SJens Axboe * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the 3194bf94615SJens Axboe * request 'user_data' 320970f256eSJens Axboe * IORING_ASYNC_CANCEL_ANY Match any request 3217d8ca725SJens Axboe * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor 3228165b566SJens Axboe * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key 323d7b8b079SJens Axboe * IORING_ASYNC_CANCEL_OP Match request based on opcode 3248e29da69SJens Axboe */ 3258e29da69SJens Axboe #define IORING_ASYNC_CANCEL_ALL (1U << 0) 3264bf94615SJens Axboe #define IORING_ASYNC_CANCEL_FD (1U << 1) 327970f256eSJens Axboe #define IORING_ASYNC_CANCEL_ANY (1U << 2) 3287d8ca725SJens Axboe #define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3) 3298165b566SJens Axboe #define IORING_ASYNC_CANCEL_USERDATA (1U << 4) 330d7b8b079SJens Axboe #define IORING_ASYNC_CANCEL_OP (1U << 5) 3318e29da69SJens Axboe 3328e29da69SJens Axboe /* 33329c1ac23SPavel Begunkov * send/sendmsg and recv/recvmsg flags (sqe->ioprio) 3340455d4ccSJens Axboe * 3350455d4ccSJens Axboe * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send 3360455d4ccSJens Axboe * or receive and arm poll if that yields an 3370455d4ccSJens Axboe * -EAGAIN result, arm poll upfront and skip 3380455d4ccSJens Axboe * the initial transfer attempt. 339b3fdea6eSDylan Yudaken * 340b3fdea6eSDylan Yudaken * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if 341b3fdea6eSDylan Yudaken * the handler will continue to report 342b3fdea6eSDylan Yudaken * CQEs on behalf of the same SQE. 34310c7d33eSPavel Begunkov * 34410c7d33eSPavel Begunkov * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in 34510c7d33eSPavel Begunkov * the buf_index field. 346e307e669SStefan Metzmacher * 347e307e669SStefan Metzmacher * IORING_SEND_ZC_REPORT_USAGE 348e307e669SStefan Metzmacher * If set, SEND[MSG]_ZC should report 349e307e669SStefan Metzmacher * the zerocopy usage in cqe.res 350e307e669SStefan Metzmacher * for the IORING_CQE_F_NOTIF cqe. 351e307e669SStefan Metzmacher * 0 is reported if zerocopy was actually possible. 352e307e669SStefan Metzmacher * IORING_NOTIF_USAGE_ZC_COPIED if data was copied 353e307e669SStefan Metzmacher * (at least partially). 3540455d4ccSJens Axboe */ 3550455d4ccSJens Axboe #define IORING_RECVSEND_POLL_FIRST (1U << 0) 356b3fdea6eSDylan Yudaken #define IORING_RECV_MULTISHOT (1U << 1) 35710c7d33eSPavel Begunkov #define IORING_RECVSEND_FIXED_BUF (1U << 2) 358e307e669SStefan Metzmacher #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 359e307e669SStefan Metzmacher 360e307e669SStefan Metzmacher /* 361e307e669SStefan Metzmacher * cqe.res for IORING_CQE_F_NOTIF if 362e307e669SStefan Metzmacher * IORING_SEND_ZC_REPORT_USAGE was requested 363e307e669SStefan Metzmacher * 364e307e669SStefan Metzmacher * It should be treated as a flag, all other 365e307e669SStefan Metzmacher * bits of cqe.res should be treated as reserved! 366e307e669SStefan Metzmacher */ 367e307e669SStefan Metzmacher #define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31) 3680455d4ccSJens Axboe 3690455d4ccSJens Axboe /* 370390ed29bSHao Xu * accept flags stored in sqe->ioprio 371390ed29bSHao Xu */ 372390ed29bSHao Xu #define IORING_ACCEPT_MULTISHOT (1U << 0) 373390ed29bSHao Xu 374390ed29bSHao Xu /* 375e6130ebaSJens Axboe * IORING_OP_MSG_RING command types, stored in sqe->addr 376e6130ebaSJens Axboe */ 377e6130ebaSJens Axboe enum { 378e6130ebaSJens Axboe IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ 379e6130ebaSJens Axboe IORING_MSG_SEND_FD, /* send a registered fd to another ring */ 380e6130ebaSJens Axboe }; 381e6130ebaSJens Axboe 382e6130ebaSJens Axboe /* 383e6130ebaSJens Axboe * IORING_OP_MSG_RING flags (sqe->msg_ring_flags) 384e6130ebaSJens Axboe * 385e6130ebaSJens Axboe * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not 386e6130ebaSJens Axboe * applicable for IORING_MSG_DATA, obviously. 387e6130ebaSJens Axboe */ 388e6130ebaSJens Axboe #define IORING_MSG_RING_CQE_SKIP (1U << 0) 389cbeb47a7SBreno Leitao /* Pass through the flags from sqe->file_index to cqe->flags */ 390cbeb47a7SBreno Leitao #define IORING_MSG_RING_FLAGS_PASS (1U << 1) 391e6130ebaSJens Axboe 392e6130ebaSJens Axboe /* 393dc18b89aSJens Axboe * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags) 394dc18b89aSJens Axboe * 395dc18b89aSJens Axboe * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC 396dc18b89aSJens Axboe */ 397dc18b89aSJens Axboe #define IORING_FIXED_FD_NO_CLOEXEC (1U << 0) 398dc18b89aSJens Axboe 399dc18b89aSJens Axboe /* 4002b188cc1SJens Axboe * IO completion data structure (Completion Queue Entry) 4012b188cc1SJens Axboe */ 4022b188cc1SJens Axboe struct io_uring_cqe { 4032b188cc1SJens Axboe __u64 user_data; /* sqe->data submission passed back */ 4042b188cc1SJens Axboe __s32 res; /* result code for this event */ 4052b188cc1SJens Axboe __u32 flags; 4067a51e5b4SStefan Roesch 4077a51e5b4SStefan Roesch /* 4087a51e5b4SStefan Roesch * If the ring is initialized with IORING_SETUP_CQE32, then this field 4097a51e5b4SStefan Roesch * contains 16-bytes of padding, doubling the size of the CQE. 4107a51e5b4SStefan Roesch */ 4117a51e5b4SStefan Roesch __u64 big_cqe[]; 4122b188cc1SJens Axboe }; 4132b188cc1SJens Axboe 4142b188cc1SJens Axboe /* 415bcda7baaSJens Axboe * cqe->flags 416bcda7baaSJens Axboe * 417bcda7baaSJens Axboe * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID 41888e41cf9SJens Axboe * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries 419f548a12eSJens Axboe * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv 420b48c312bSPavel Begunkov * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct 421b48c312bSPavel Begunkov * them from sends. 422bcda7baaSJens Axboe */ 423bcda7baaSJens Axboe #define IORING_CQE_F_BUFFER (1U << 0) 42488e41cf9SJens Axboe #define IORING_CQE_F_MORE (1U << 1) 425f548a12eSJens Axboe #define IORING_CQE_F_SOCK_NONEMPTY (1U << 2) 426b48c312bSPavel Begunkov #define IORING_CQE_F_NOTIF (1U << 3) 427bcda7baaSJens Axboe 428bcda7baaSJens Axboe enum { 429bcda7baaSJens Axboe IORING_CQE_BUFFER_SHIFT = 16, 430bcda7baaSJens Axboe }; 431bcda7baaSJens Axboe 432bcda7baaSJens Axboe /* 4332b188cc1SJens Axboe * Magic offsets for the application to mmap the data it needs 4342b188cc1SJens Axboe */ 4352b188cc1SJens Axboe #define IORING_OFF_SQ_RING 0ULL 4362b188cc1SJens Axboe #define IORING_OFF_CQ_RING 0x8000000ULL 4372b188cc1SJens Axboe #define IORING_OFF_SQES 0x10000000ULL 438c56e022cSJens Axboe #define IORING_OFF_PBUF_RING 0x80000000ULL 439c56e022cSJens Axboe #define IORING_OFF_PBUF_SHIFT 16 440c56e022cSJens Axboe #define IORING_OFF_MMAP_MASK 0xf8000000ULL 4412b188cc1SJens Axboe 4422b188cc1SJens Axboe /* 4432b188cc1SJens Axboe * Filled with the offset for mmap(2) 4442b188cc1SJens Axboe */ 4452b188cc1SJens Axboe struct io_sqring_offsets { 4462b188cc1SJens Axboe __u32 head; 4472b188cc1SJens Axboe __u32 tail; 4482b188cc1SJens Axboe __u32 ring_mask; 4492b188cc1SJens Axboe __u32 ring_entries; 4502b188cc1SJens Axboe __u32 flags; 4512b188cc1SJens Axboe __u32 dropped; 4522b188cc1SJens Axboe __u32 array; 4532b188cc1SJens Axboe __u32 resv1; 45403d89a2dSJens Axboe __u64 user_addr; 4552b188cc1SJens Axboe }; 4562b188cc1SJens Axboe 4576c271ce2SJens Axboe /* 4586c271ce2SJens Axboe * sq_ring->flags 4596c271ce2SJens Axboe */ 4606c271ce2SJens Axboe #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ 4616d5f9049SXiaoguang Wang #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ 462ef060ea9SJens Axboe #define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */ 4636c271ce2SJens Axboe 4642b188cc1SJens Axboe struct io_cqring_offsets { 4652b188cc1SJens Axboe __u32 head; 4662b188cc1SJens Axboe __u32 tail; 4672b188cc1SJens Axboe __u32 ring_mask; 4682b188cc1SJens Axboe __u32 ring_entries; 4692b188cc1SJens Axboe __u32 overflow; 4702b188cc1SJens Axboe __u32 cqes; 4710d9b5b3aSStefano Garzarella __u32 flags; 4720d9b5b3aSStefano Garzarella __u32 resv1; 47303d89a2dSJens Axboe __u64 user_addr; 4742b188cc1SJens Axboe }; 4752b188cc1SJens Axboe 4762b188cc1SJens Axboe /* 4777e55a19cSStefano Garzarella * cq_ring->flags 4787e55a19cSStefano Garzarella */ 4797e55a19cSStefano Garzarella 4807e55a19cSStefano Garzarella /* disable eventfd notifications */ 4817e55a19cSStefano Garzarella #define IORING_CQ_EVENTFD_DISABLED (1U << 0) 4827e55a19cSStefano Garzarella 4837e55a19cSStefano Garzarella /* 4842b188cc1SJens Axboe * io_uring_enter(2) flags 4852b188cc1SJens Axboe */ 4862b188cc1SJens Axboe #define IORING_ENTER_GETEVENTS (1U << 0) 4876c271ce2SJens Axboe #define IORING_ENTER_SQ_WAKEUP (1U << 1) 48890554200SJens Axboe #define IORING_ENTER_SQ_WAIT (1U << 2) 489c73ebb68SHao Xu #define IORING_ENTER_EXT_ARG (1U << 3) 490e7a6c00dSJens Axboe #define IORING_ENTER_REGISTERED_RING (1U << 4) 4912b188cc1SJens Axboe 4922b188cc1SJens Axboe /* 4932b188cc1SJens Axboe * Passed in for io_uring_setup(2). Copied back with updated info on success 4942b188cc1SJens Axboe */ 4952b188cc1SJens Axboe struct io_uring_params { 4962b188cc1SJens Axboe __u32 sq_entries; 4972b188cc1SJens Axboe __u32 cq_entries; 4982b188cc1SJens Axboe __u32 flags; 4996c271ce2SJens Axboe __u32 sq_thread_cpu; 5006c271ce2SJens Axboe __u32 sq_thread_idle; 501ac90f249SJens Axboe __u32 features; 50224369c2eSPavel Begunkov __u32 wq_fd; 50324369c2eSPavel Begunkov __u32 resv[3]; 5042b188cc1SJens Axboe struct io_sqring_offsets sq_off; 5052b188cc1SJens Axboe struct io_cqring_offsets cq_off; 5062b188cc1SJens Axboe }; 5072b188cc1SJens Axboe 508edafcceeSJens Axboe /* 509ac90f249SJens Axboe * io_uring_params->features flags 510ac90f249SJens Axboe */ 511ac90f249SJens Axboe #define IORING_FEAT_SINGLE_MMAP (1U << 0) 5121d7bb1d5SJens Axboe #define IORING_FEAT_NODROP (1U << 1) 513da8c9690SJens Axboe #define IORING_FEAT_SUBMIT_STABLE (1U << 2) 514ba04291eSJens Axboe #define IORING_FEAT_RW_CUR_POS (1U << 3) 515cccf0ee8SJens Axboe #define IORING_FEAT_CUR_PERSONALITY (1U << 4) 516d7718a9dSJens Axboe #define IORING_FEAT_FAST_POLL (1U << 5) 5175769a351SJiufei Xue #define IORING_FEAT_POLL_32BITS (1U << 6) 51828cea78aSJens Axboe #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) 519c73ebb68SHao Xu #define IORING_FEAT_EXT_ARG (1U << 8) 5201c0aa1faSJens Axboe #define IORING_FEAT_NATIVE_WORKERS (1U << 9) 5219690557eSPavel Begunkov #define IORING_FEAT_RSRC_TAGS (1U << 10) 52204c76b41SPavel Begunkov #define IORING_FEAT_CQE_SKIP (1U << 11) 523c4212f3eSJens Axboe #define IORING_FEAT_LINKED_FILE (1U << 12) 5247d3fd88dSJosh Triplett #define IORING_FEAT_REG_REG_RING (1U << 13) 525ac90f249SJens Axboe 526ac90f249SJens Axboe /* 527edafcceeSJens Axboe * io_uring_register(2) opcodes and arguments 528edafcceeSJens Axboe */ 5299d4a75efSStefano Garzarella enum { 5309d4a75efSStefano Garzarella IORING_REGISTER_BUFFERS = 0, 5319d4a75efSStefano Garzarella IORING_UNREGISTER_BUFFERS = 1, 5329d4a75efSStefano Garzarella IORING_REGISTER_FILES = 2, 5339d4a75efSStefano Garzarella IORING_UNREGISTER_FILES = 3, 5349d4a75efSStefano Garzarella IORING_REGISTER_EVENTFD = 4, 5359d4a75efSStefano Garzarella IORING_UNREGISTER_EVENTFD = 5, 5369d4a75efSStefano Garzarella IORING_REGISTER_FILES_UPDATE = 6, 5379d4a75efSStefano Garzarella IORING_REGISTER_EVENTFD_ASYNC = 7, 5389d4a75efSStefano Garzarella IORING_REGISTER_PROBE = 8, 5399d4a75efSStefano Garzarella IORING_REGISTER_PERSONALITY = 9, 5409d4a75efSStefano Garzarella IORING_UNREGISTER_PERSONALITY = 10, 54121b55dbcSStefano Garzarella IORING_REGISTER_RESTRICTIONS = 11, 5427e84e1c7SStefano Garzarella IORING_REGISTER_ENABLE_RINGS = 12, 543992da01aSPavel Begunkov 544992da01aSPavel Begunkov /* extended with tagging */ 545992da01aSPavel Begunkov IORING_REGISTER_FILES2 = 13, 546992da01aSPavel Begunkov IORING_REGISTER_FILES_UPDATE2 = 14, 547992da01aSPavel Begunkov IORING_REGISTER_BUFFERS2 = 15, 548992da01aSPavel Begunkov IORING_REGISTER_BUFFERS_UPDATE = 16, 5499d4a75efSStefano Garzarella 550fe76421dSJens Axboe /* set/clear io-wq thread affinities */ 551fe76421dSJens Axboe IORING_REGISTER_IOWQ_AFF = 17, 552fe76421dSJens Axboe IORING_UNREGISTER_IOWQ_AFF = 18, 553fe76421dSJens Axboe 554dd47c104SEugene Syromiatnikov /* set/get max number of io-wq workers */ 5552e480058SJens Axboe IORING_REGISTER_IOWQ_MAX_WORKERS = 19, 5562e480058SJens Axboe 557e7a6c00dSJens Axboe /* register/unregister io_uring fd with the ring */ 558e7a6c00dSJens Axboe IORING_REGISTER_RING_FDS = 20, 559e7a6c00dSJens Axboe IORING_UNREGISTER_RING_FDS = 21, 560e7a6c00dSJens Axboe 561c7fb1942SJens Axboe /* register ring based provide buffer group */ 562c7fb1942SJens Axboe IORING_REGISTER_PBUF_RING = 22, 563c7fb1942SJens Axboe IORING_UNREGISTER_PBUF_RING = 23, 564c7fb1942SJens Axboe 56578a861b9SJens Axboe /* sync cancelation API */ 56678a861b9SJens Axboe IORING_REGISTER_SYNC_CANCEL = 24, 56778a861b9SJens Axboe 5686e73dffbSPavel Begunkov /* register a range of fixed file slots for automatic slot allocation */ 5696e73dffbSPavel Begunkov IORING_REGISTER_FILE_ALLOC_RANGE = 25, 5706e73dffbSPavel Begunkov 571d293b1a8SJens Axboe /* return status information for a buffer group */ 572d293b1a8SJens Axboe IORING_REGISTER_PBUF_STATUS = 26, 573d293b1a8SJens Axboe 574*ef1186c1SStefan Roesch /* set/clear busy poll settings */ 575*ef1186c1SStefan Roesch IORING_REGISTER_NAPI = 27, 576*ef1186c1SStefan Roesch IORING_UNREGISTER_NAPI = 28, 577*ef1186c1SStefan Roesch 5789d4a75efSStefano Garzarella /* this goes last */ 5797d3fd88dSJosh Triplett IORING_REGISTER_LAST, 5807d3fd88dSJosh Triplett 5817d3fd88dSJosh Triplett /* flag added to the opcode to use a registered ring fd */ 5827d3fd88dSJosh Triplett IORING_REGISTER_USE_REGISTERED_RING = 1U << 31 5839d4a75efSStefano Garzarella }; 584c3a31e60SJens Axboe 585dd47c104SEugene Syromiatnikov /* io-wq worker categories */ 586dd47c104SEugene Syromiatnikov enum { 587dd47c104SEugene Syromiatnikov IO_WQ_BOUND, 588dd47c104SEugene Syromiatnikov IO_WQ_UNBOUND, 589dd47c104SEugene Syromiatnikov }; 590dd47c104SEugene Syromiatnikov 591269bbe5fSBijan Mottahedeh /* deprecated, see struct io_uring_rsrc_update */ 592c3a31e60SJens Axboe struct io_uring_files_update { 593c3a31e60SJens Axboe __u32 offset; 5941292e972SEugene Syromiatnikov __u32 resv; 5951292e972SEugene Syromiatnikov __aligned_u64 /* __s32 * */ fds; 596c3a31e60SJens Axboe }; 597edafcceeSJens Axboe 598a8da73a3SJens Axboe /* 599a8da73a3SJens Axboe * Register a fully sparse file space, rather than pass in an array of all 600a8da73a3SJens Axboe * -1 file descriptors. 601a8da73a3SJens Axboe */ 602a8da73a3SJens Axboe #define IORING_RSRC_REGISTER_SPARSE (1U << 0) 603a8da73a3SJens Axboe 604792e3582SPavel Begunkov struct io_uring_rsrc_register { 605792e3582SPavel Begunkov __u32 nr; 606a8da73a3SJens Axboe __u32 flags; 607992da01aSPavel Begunkov __u64 resv2; 608792e3582SPavel Begunkov __aligned_u64 data; 609792e3582SPavel Begunkov __aligned_u64 tags; 610792e3582SPavel Begunkov }; 611792e3582SPavel Begunkov 612c3bdad02SPavel Begunkov struct io_uring_rsrc_update { 613c3bdad02SPavel Begunkov __u32 offset; 614c3bdad02SPavel Begunkov __u32 resv; 615c3bdad02SPavel Begunkov __aligned_u64 data; 616c3bdad02SPavel Begunkov }; 617c3bdad02SPavel Begunkov 618c3bdad02SPavel Begunkov struct io_uring_rsrc_update2 { 619c3bdad02SPavel Begunkov __u32 offset; 620c3bdad02SPavel Begunkov __u32 resv; 621c3bdad02SPavel Begunkov __aligned_u64 data; 622c3bdad02SPavel Begunkov __aligned_u64 tags; 623c3bdad02SPavel Begunkov __u32 nr; 624992da01aSPavel Begunkov __u32 resv2; 625c3bdad02SPavel Begunkov }; 626c3bdad02SPavel Begunkov 6274e0377a1Snoah /* Skip updating fd indexes set to this value in the fd table */ 6284e0377a1Snoah #define IORING_REGISTER_FILES_SKIP (-2) 6294e0377a1Snoah 63066f4af93SJens Axboe #define IO_URING_OP_SUPPORTED (1U << 0) 63166f4af93SJens Axboe 63266f4af93SJens Axboe struct io_uring_probe_op { 63366f4af93SJens Axboe __u8 op; 63466f4af93SJens Axboe __u8 resv; 63566f4af93SJens Axboe __u16 flags; /* IO_URING_OP_* flags */ 63666f4af93SJens Axboe __u32 resv2; 63766f4af93SJens Axboe }; 63866f4af93SJens Axboe 63966f4af93SJens Axboe struct io_uring_probe { 64066f4af93SJens Axboe __u8 last_op; /* last opcode supported */ 64166f4af93SJens Axboe __u8 ops_len; /* length of ops[] array below */ 64266f4af93SJens Axboe __u16 resv; 64366f4af93SJens Axboe __u32 resv2[3]; 6448fcf4c48SGustavo A. R. Silva struct io_uring_probe_op ops[]; 64566f4af93SJens Axboe }; 64666f4af93SJens Axboe 64721b55dbcSStefano Garzarella struct io_uring_restriction { 64821b55dbcSStefano Garzarella __u16 opcode; 64921b55dbcSStefano Garzarella union { 65021b55dbcSStefano Garzarella __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */ 65121b55dbcSStefano Garzarella __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */ 65221b55dbcSStefano Garzarella __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */ 65321b55dbcSStefano Garzarella }; 65421b55dbcSStefano Garzarella __u8 resv; 65521b55dbcSStefano Garzarella __u32 resv2[3]; 65621b55dbcSStefano Garzarella }; 65721b55dbcSStefano Garzarella 658c7fb1942SJens Axboe struct io_uring_buf { 659c7fb1942SJens Axboe __u64 addr; 660c7fb1942SJens Axboe __u32 len; 661c7fb1942SJens Axboe __u16 bid; 662c7fb1942SJens Axboe __u16 resv; 663c7fb1942SJens Axboe }; 664c7fb1942SJens Axboe 665c7fb1942SJens Axboe struct io_uring_buf_ring { 666c7fb1942SJens Axboe union { 667c7fb1942SJens Axboe /* 668c7fb1942SJens Axboe * To avoid spilling into more pages than we need to, the 669c7fb1942SJens Axboe * ring tail is overlaid with the io_uring_buf->resv field. 670c7fb1942SJens Axboe */ 671c7fb1942SJens Axboe struct { 672c7fb1942SJens Axboe __u64 resv1; 673c7fb1942SJens Axboe __u32 resv2; 674c7fb1942SJens Axboe __u16 resv3; 675c7fb1942SJens Axboe __u16 tail; 676c7fb1942SJens Axboe }; 67736632d06SKees Cook __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs); 678c7fb1942SJens Axboe }; 679c7fb1942SJens Axboe }; 680c7fb1942SJens Axboe 681c56e022cSJens Axboe /* 682c56e022cSJens Axboe * Flags for IORING_REGISTER_PBUF_RING. 683c56e022cSJens Axboe * 684c56e022cSJens Axboe * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring. 685c56e022cSJens Axboe * The application must not set a ring_addr in struct 686c56e022cSJens Axboe * io_uring_buf_reg, instead it must subsequently call 687c56e022cSJens Axboe * mmap(2) with the offset set as: 688c56e022cSJens Axboe * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT) 689c56e022cSJens Axboe * to get a virtual mapping for the ring. 690c56e022cSJens Axboe */ 691c56e022cSJens Axboe enum { 692c56e022cSJens Axboe IOU_PBUF_RING_MMAP = 1, 693c56e022cSJens Axboe }; 694c56e022cSJens Axboe 695c7fb1942SJens Axboe /* argument for IORING_(UN)REGISTER_PBUF_RING */ 696c7fb1942SJens Axboe struct io_uring_buf_reg { 697c7fb1942SJens Axboe __u64 ring_addr; 698c7fb1942SJens Axboe __u32 ring_entries; 699c7fb1942SJens Axboe __u16 bgid; 70081cf17cdSJens Axboe __u16 flags; 701c7fb1942SJens Axboe __u64 resv[3]; 702c7fb1942SJens Axboe }; 703c7fb1942SJens Axboe 704d293b1a8SJens Axboe /* argument for IORING_REGISTER_PBUF_STATUS */ 705d293b1a8SJens Axboe struct io_uring_buf_status { 706d293b1a8SJens Axboe __u32 buf_group; /* input */ 707d293b1a8SJens Axboe __u32 head; /* output */ 708d293b1a8SJens Axboe __u32 resv[8]; 709d293b1a8SJens Axboe }; 710d293b1a8SJens Axboe 711*ef1186c1SStefan Roesch /* argument for IORING_(UN)REGISTER_NAPI */ 712*ef1186c1SStefan Roesch struct io_uring_napi { 713*ef1186c1SStefan Roesch __u32 busy_poll_to; 714*ef1186c1SStefan Roesch __u8 prefer_busy_poll; 715*ef1186c1SStefan Roesch __u8 pad[3]; 716*ef1186c1SStefan Roesch __u64 resv; 717*ef1186c1SStefan Roesch }; 718*ef1186c1SStefan Roesch 71921b55dbcSStefano Garzarella /* 72021b55dbcSStefano Garzarella * io_uring_restriction->opcode values 72121b55dbcSStefano Garzarella */ 72221b55dbcSStefano Garzarella enum { 72321b55dbcSStefano Garzarella /* Allow an io_uring_register(2) opcode */ 72421b55dbcSStefano Garzarella IORING_RESTRICTION_REGISTER_OP = 0, 72521b55dbcSStefano Garzarella 72621b55dbcSStefano Garzarella /* Allow an sqe opcode */ 72721b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_OP = 1, 72821b55dbcSStefano Garzarella 72921b55dbcSStefano Garzarella /* Allow sqe flags */ 73021b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, 73121b55dbcSStefano Garzarella 73221b55dbcSStefano Garzarella /* Require sqe flags (these flags must be set on each submission) */ 73321b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, 73421b55dbcSStefano Garzarella 73521b55dbcSStefano Garzarella IORING_RESTRICTION_LAST 73621b55dbcSStefano Garzarella }; 73721b55dbcSStefano Garzarella 738c73ebb68SHao Xu struct io_uring_getevents_arg { 739c73ebb68SHao Xu __u64 sigmask; 740c73ebb68SHao Xu __u32 sigmask_sz; 741c73ebb68SHao Xu __u32 pad; 742c73ebb68SHao Xu __u64 ts; 743c73ebb68SHao Xu }; 744c73ebb68SHao Xu 74578a861b9SJens Axboe /* 74678a861b9SJens Axboe * Argument for IORING_REGISTER_SYNC_CANCEL 74778a861b9SJens Axboe */ 74878a861b9SJens Axboe struct io_uring_sync_cancel_reg { 74978a861b9SJens Axboe __u64 addr; 75078a861b9SJens Axboe __s32 fd; 75178a861b9SJens Axboe __u32 flags; 75278a861b9SJens Axboe struct __kernel_timespec timeout; 753f77569d2SJens Axboe __u8 opcode; 754f77569d2SJens Axboe __u8 pad[7]; 755f77569d2SJens Axboe __u64 pad2[3]; 75678a861b9SJens Axboe }; 75778a861b9SJens Axboe 7586e73dffbSPavel Begunkov /* 7596e73dffbSPavel Begunkov * Argument for IORING_REGISTER_FILE_ALLOC_RANGE 7606e73dffbSPavel Begunkov * The range is specified as [off, off + len) 7616e73dffbSPavel Begunkov */ 7626e73dffbSPavel Begunkov struct io_uring_file_index_range { 7636e73dffbSPavel Begunkov __u32 off; 7646e73dffbSPavel Begunkov __u32 len; 7656e73dffbSPavel Begunkov __u64 resv; 7666e73dffbSPavel Begunkov }; 7676e73dffbSPavel Begunkov 7689bb66906SDylan Yudaken struct io_uring_recvmsg_out { 7699bb66906SDylan Yudaken __u32 namelen; 7709bb66906SDylan Yudaken __u32 controllen; 7719bb66906SDylan Yudaken __u32 payloadlen; 7729bb66906SDylan Yudaken __u32 flags; 7739bb66906SDylan Yudaken }; 7749bb66906SDylan Yudaken 7758e9fad0eSBreno Leitao /* 7768e9fad0eSBreno Leitao * Argument for IORING_OP_URING_CMD when file is a socket 7778e9fad0eSBreno Leitao */ 7788e9fad0eSBreno Leitao enum { 7798e9fad0eSBreno Leitao SOCKET_URING_OP_SIOCINQ = 0, 7808e9fad0eSBreno Leitao SOCKET_URING_OP_SIOCOUTQ, 781a5d2f99aSBreno Leitao SOCKET_URING_OP_GETSOCKOPT, 7824232c6e3SBreno Leitao SOCKET_URING_OP_SETSOCKOPT, 7838e9fad0eSBreno Leitao }; 7848e9fad0eSBreno Leitao 785e1d0c6d0SAmmar Faizi #ifdef __cplusplus 786e1d0c6d0SAmmar Faizi } 787e1d0c6d0SAmmar Faizi #endif 788e1d0c6d0SAmmar Faizi 7892b188cc1SJens Axboe #endif 790