19f5834c8SLukas Bulwahn /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ 22b188cc1SJens Axboe /* 32b188cc1SJens Axboe * Header file for the io_uring interface. 42b188cc1SJens Axboe * 52b188cc1SJens Axboe * Copyright (C) 2019 Jens Axboe 62b188cc1SJens Axboe * Copyright (C) 2019 Christoph Hellwig 72b188cc1SJens Axboe */ 82b188cc1SJens Axboe #ifndef LINUX_IO_URING_H 92b188cc1SJens Axboe #define LINUX_IO_URING_H 102b188cc1SJens Axboe 112b188cc1SJens Axboe #include <linux/fs.h> 122b188cc1SJens Axboe #include <linux/types.h> 139eb80340SStefan Metzmacher /* 149eb80340SStefan Metzmacher * this file is shared with liburing and that has to autodetect 159eb80340SStefan Metzmacher * if linux/time_types.h is available or not, it can 169eb80340SStefan Metzmacher * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 179eb80340SStefan Metzmacher * if linux/time_types.h is not available 189eb80340SStefan Metzmacher */ 199eb80340SStefan Metzmacher #ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 2078a861b9SJens Axboe #include <linux/time_types.h> 219eb80340SStefan Metzmacher #endif 222b188cc1SJens Axboe 23e1d0c6d0SAmmar Faizi #ifdef __cplusplus 24e1d0c6d0SAmmar Faizi extern "C" { 25e1d0c6d0SAmmar Faizi #endif 26e1d0c6d0SAmmar Faizi 272b188cc1SJens Axboe /* 282b188cc1SJens Axboe * IO submission data structure (Submission Queue Entry) 292b188cc1SJens Axboe */ 302b188cc1SJens Axboe struct io_uring_sqe { 312b188cc1SJens Axboe __u8 opcode; /* type of operation for this sqe */ 326b06314cSJens Axboe __u8 flags; /* IOSQE_ flags */ 332b188cc1SJens Axboe __u16 ioprio; /* ioprio for the request */ 342b188cc1SJens Axboe __s32 fd; /* file descriptor to do IO on */ 3517f2fe35SJens Axboe union { 362b188cc1SJens Axboe __u64 off; /* offset into file */ 3717f2fe35SJens Axboe __u64 addr2; 38bdb2c48eSPavel Begunkov struct { 39ee692a21SJens Axboe __u32 cmd_op; 40bdb2c48eSPavel Begunkov __u32 __pad1; 41bdb2c48eSPavel Begunkov }; 4217f2fe35SJens Axboe }; 437d67af2cSPavel Begunkov union { 442b188cc1SJens Axboe __u64 addr; /* pointer to buffer or iovecs */ 457d67af2cSPavel Begunkov __u64 splice_off_in; 467d67af2cSPavel Begunkov }; 472b188cc1SJens Axboe __u32 len; /* buffer size or number of iovecs */ 482b188cc1SJens Axboe union { 492b188cc1SJens Axboe __kernel_rwf_t rw_flags; 50c992fe29SChristoph Hellwig __u32 fsync_flags; 515769a351SJiufei Xue __u16 poll_events; /* compatibility */ 525769a351SJiufei Xue __u32 poll32_events; /* word-reversed for BE */ 535d17b4a4SJens Axboe __u32 sync_range_flags; 540fa03c62SJens Axboe __u32 msg_flags; 555262f567SJens Axboe __u32 timeout_flags; 5617f2fe35SJens Axboe __u32 accept_flags; 5762755e35SJens Axboe __u32 cancel_flags; 5815b71abeSJens Axboe __u32 open_flags; 59eddc7ef5SJens Axboe __u32 statx_flags; 604840e418SJens Axboe __u32 fadvise_advice; 617d67af2cSPavel Begunkov __u32 splice_flags; 6280a261fdSJens Axboe __u32 rename_flags; 6314a1143bSJens Axboe __u32 unlink_flags; 64cf30da90SDmitry Kadashev __u32 hardlink_flags; 65e9621e2bSStefan Roesch __u32 xattr_flags; 66e6130ebaSJens Axboe __u32 msg_ring_flags; 679cda70f6SAnuj Gupta __u32 uring_cmd_flags; 68*f31ecf67SJens Axboe __u32 waitid_flags; 692b188cc1SJens Axboe }; 702b188cc1SJens Axboe __u64 user_data; /* data to be passed back at completion time */ 71ddf0322dSJens Axboe /* pack this to avoid bogus arm OABI complaints */ 72ddf0322dSJens Axboe union { 7375c6a039SJens Axboe /* index into fixed buffers, if used */ 7475c6a039SJens Axboe __u16 buf_index; 75ddf0322dSJens Axboe /* for grouped buffer selection */ 76ddf0322dSJens Axboe __u16 buf_group; 77ddf0322dSJens Axboe } __attribute__((packed)); 7875c6a039SJens Axboe /* personality to use, if used */ 7975c6a039SJens Axboe __u16 personality; 80b9445598SPavel Begunkov union { 817d67af2cSPavel Begunkov __s32 splice_fd_in; 82b9445598SPavel Begunkov __u32 file_index; 8306a5464bSPavel Begunkov struct { 84092aeedbSPavel Begunkov __u16 addr_len; 85b48c312bSPavel Begunkov __u16 __pad3[1]; 8606a5464bSPavel Begunkov }; 87b9445598SPavel Begunkov }; 88ee692a21SJens Axboe union { 89ee692a21SJens Axboe struct { 90e9621e2bSStefan Roesch __u64 addr3; 91e9621e2bSStefan Roesch __u64 __pad2[1]; 92edafcceeSJens Axboe }; 93ebdeb7c0SJens Axboe /* 94ee692a21SJens Axboe * If the ring is initialized with IORING_SETUP_SQE128, then 95ee692a21SJens Axboe * this field is used for 80 bytes of arbitrary command data 96ebdeb7c0SJens Axboe */ 97ee692a21SJens Axboe __u8 cmd[0]; 98ee692a21SJens Axboe }; 992b188cc1SJens Axboe }; 1002b188cc1SJens Axboe 1011339f24bSJens Axboe /* 1021339f24bSJens Axboe * If sqe->file_index is set to this for opcodes that instantiate a new 1031339f24bSJens Axboe * direct descriptor (like openat/openat2/accept), then io_uring will allocate 1041339f24bSJens Axboe * an available direct descriptor instead of having the application pass one 1051339f24bSJens Axboe * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE 1061339f24bSJens Axboe * if the space is full. 1071339f24bSJens Axboe */ 1081339f24bSJens Axboe #define IORING_FILE_INDEX_ALLOC (~0U) 1091339f24bSJens Axboe 1106b47ee6eSPavel Begunkov enum { 1116b47ee6eSPavel Begunkov IOSQE_FIXED_FILE_BIT, 1126b47ee6eSPavel Begunkov IOSQE_IO_DRAIN_BIT, 1136b47ee6eSPavel Begunkov IOSQE_IO_LINK_BIT, 1146b47ee6eSPavel Begunkov IOSQE_IO_HARDLINK_BIT, 1156b47ee6eSPavel Begunkov IOSQE_ASYNC_BIT, 116bcda7baaSJens Axboe IOSQE_BUFFER_SELECT_BIT, 11704c76b41SPavel Begunkov IOSQE_CQE_SKIP_SUCCESS_BIT, 1186b47ee6eSPavel Begunkov }; 1196b47ee6eSPavel Begunkov 120def596e9SJens Axboe /* 1216b06314cSJens Axboe * sqe->flags 1226b06314cSJens Axboe */ 1236b47ee6eSPavel Begunkov /* use fixed fileset */ 1246b47ee6eSPavel Begunkov #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT) 1256b47ee6eSPavel Begunkov /* issue after inflight IO */ 1266b47ee6eSPavel Begunkov #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT) 1276b47ee6eSPavel Begunkov /* links next sqe */ 1286b47ee6eSPavel Begunkov #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT) 1296b47ee6eSPavel Begunkov /* like LINK, but stronger */ 1306b47ee6eSPavel Begunkov #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) 1316b47ee6eSPavel Begunkov /* always go async */ 1326b47ee6eSPavel Begunkov #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) 133bcda7baaSJens Axboe /* select buffer from sqe->buf_group */ 134bcda7baaSJens Axboe #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) 13504c76b41SPavel Begunkov /* don't post CQE if request succeeded */ 13604c76b41SPavel Begunkov #define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT) 1376b06314cSJens Axboe 1386b06314cSJens Axboe /* 139def596e9SJens Axboe * io_uring_setup() flags 140def596e9SJens Axboe */ 141def596e9SJens Axboe #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ 1426c271ce2SJens Axboe #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ 1436c271ce2SJens Axboe #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ 14433a107f0SJens Axboe #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ 1458110c1a6SJens Axboe #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ 14624369c2eSPavel Begunkov #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ 1477e84e1c7SStefano Garzarella #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ 148bcbb7bf6SJens Axboe #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ 149e1169f06SJens Axboe /* 150e1169f06SJens Axboe * Cooperative task running. When requests complete, they often require 151e1169f06SJens Axboe * forcing the submitter to transition to the kernel to complete. If this 152e1169f06SJens Axboe * flag is set, work will be done when the task transitions anyway, rather 153e1169f06SJens Axboe * than force an inter-processor interrupt reschedule. This avoids interrupting 154e1169f06SJens Axboe * a task running in userspace, and saves an IPI. 155e1169f06SJens Axboe */ 156e1169f06SJens Axboe #define IORING_SETUP_COOP_TASKRUN (1U << 8) 157ef060ea9SJens Axboe /* 158ef060ea9SJens Axboe * If COOP_TASKRUN is set, get notified if task work is available for 159ef060ea9SJens Axboe * running and a kernel transition would be needed to run it. This sets 160ef060ea9SJens Axboe * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. 161ef060ea9SJens Axboe */ 162ef060ea9SJens Axboe #define IORING_SETUP_TASKRUN_FLAG (1U << 9) 163ebdeb7c0SJens Axboe #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ 1647a51e5b4SStefan Roesch #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ 16597bbdc06SPavel Begunkov /* 16697bbdc06SPavel Begunkov * Only one task is allowed to submit requests 16797bbdc06SPavel Begunkov */ 16897bbdc06SPavel Begunkov #define IORING_SETUP_SINGLE_ISSUER (1U << 12) 169ebdeb7c0SJens Axboe 170c0e0d6baSDylan Yudaken /* 171c0e0d6baSDylan Yudaken * Defer running task work to get events. 172c0e0d6baSDylan Yudaken * Rather than running bits of task work whenever the task transitions 173c0e0d6baSDylan Yudaken * try to do it just before it is needed. 174c0e0d6baSDylan Yudaken */ 175c0e0d6baSDylan Yudaken #define IORING_SETUP_DEFER_TASKRUN (1U << 13) 176c0e0d6baSDylan Yudaken 17703d89a2dSJens Axboe /* 17803d89a2dSJens Axboe * Application provides the memory for the rings 17903d89a2dSJens Axboe */ 18003d89a2dSJens Axboe #define IORING_SETUP_NO_MMAP (1U << 14) 18103d89a2dSJens Axboe 1826e76ac59SJosh Triplett /* 1836e76ac59SJosh Triplett * Register the ring fd in itself for use with 1846e76ac59SJosh Triplett * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather 1856e76ac59SJosh Triplett * than an fd. 1866e76ac59SJosh Triplett */ 1876e76ac59SJosh Triplett #define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15) 1886e76ac59SJosh Triplett 1892af89abdSPavel Begunkov /* 1902af89abdSPavel Begunkov * Removes indirection through the SQ index array. 1912af89abdSPavel Begunkov */ 1922af89abdSPavel Begunkov #define IORING_SETUP_NO_SQARRAY (1U << 16) 1932af89abdSPavel Begunkov 194cc51eaa8SDylan Yudaken enum io_uring_op { 1959e3aa61aSJens Axboe IORING_OP_NOP, 1969e3aa61aSJens Axboe IORING_OP_READV, 1979e3aa61aSJens Axboe IORING_OP_WRITEV, 1989e3aa61aSJens Axboe IORING_OP_FSYNC, 1999e3aa61aSJens Axboe IORING_OP_READ_FIXED, 2009e3aa61aSJens Axboe IORING_OP_WRITE_FIXED, 2019e3aa61aSJens Axboe IORING_OP_POLL_ADD, 2029e3aa61aSJens Axboe IORING_OP_POLL_REMOVE, 2039e3aa61aSJens Axboe IORING_OP_SYNC_FILE_RANGE, 2049e3aa61aSJens Axboe IORING_OP_SENDMSG, 2059e3aa61aSJens Axboe IORING_OP_RECVMSG, 2069e3aa61aSJens Axboe IORING_OP_TIMEOUT, 2079e3aa61aSJens Axboe IORING_OP_TIMEOUT_REMOVE, 2089e3aa61aSJens Axboe IORING_OP_ACCEPT, 2099e3aa61aSJens Axboe IORING_OP_ASYNC_CANCEL, 2109e3aa61aSJens Axboe IORING_OP_LINK_TIMEOUT, 2119e3aa61aSJens Axboe IORING_OP_CONNECT, 212d63d1b5eSJens Axboe IORING_OP_FALLOCATE, 21315b71abeSJens Axboe IORING_OP_OPENAT, 214b5dba59eSJens Axboe IORING_OP_CLOSE, 215d9808cebSPavel Begunkov IORING_OP_FILES_UPDATE, 216eddc7ef5SJens Axboe IORING_OP_STATX, 2173a6820f2SJens Axboe IORING_OP_READ, 2183a6820f2SJens Axboe IORING_OP_WRITE, 2194840e418SJens Axboe IORING_OP_FADVISE, 220c1ca757bSJens Axboe IORING_OP_MADVISE, 221fddafaceSJens Axboe IORING_OP_SEND, 222fddafaceSJens Axboe IORING_OP_RECV, 223cebdb986SJens Axboe IORING_OP_OPENAT2, 2243e4827b0SJens Axboe IORING_OP_EPOLL_CTL, 2257d67af2cSPavel Begunkov IORING_OP_SPLICE, 226ddf0322dSJens Axboe IORING_OP_PROVIDE_BUFFERS, 227067524e9SJens Axboe IORING_OP_REMOVE_BUFFERS, 228f2a8d5c7SPavel Begunkov IORING_OP_TEE, 22936f4fa68SJens Axboe IORING_OP_SHUTDOWN, 23080a261fdSJens Axboe IORING_OP_RENAMEAT, 23114a1143bSJens Axboe IORING_OP_UNLINKAT, 232e34a02dcSDmitry Kadashev IORING_OP_MKDIRAT, 2337a8721f8SDmitry Kadashev IORING_OP_SYMLINKAT, 234cf30da90SDmitry Kadashev IORING_OP_LINKAT, 2354f57f06cSJens Axboe IORING_OP_MSG_RING, 236e9621e2bSStefan Roesch IORING_OP_FSETXATTR, 237e9621e2bSStefan Roesch IORING_OP_SETXATTR, 238a56834e0SStefan Roesch IORING_OP_FGETXATTR, 239a56834e0SStefan Roesch IORING_OP_GETXATTR, 2401374e08eSJens Axboe IORING_OP_SOCKET, 241ee692a21SJens Axboe IORING_OP_URING_CMD, 242b48c312bSPavel Begunkov IORING_OP_SEND_ZC, 243493108d9SPavel Begunkov IORING_OP_SENDMSG_ZC, 244fc68fcdaSJens Axboe IORING_OP_READ_MULTISHOT, 245*f31ecf67SJens Axboe IORING_OP_WAITID, 2469e3aa61aSJens Axboe 2479e3aa61aSJens Axboe /* this goes last, obviously */ 2489e3aa61aSJens Axboe IORING_OP_LAST, 2499e3aa61aSJens Axboe }; 250c992fe29SChristoph Hellwig 251c992fe29SChristoph Hellwig /* 2529cda70f6SAnuj Gupta * sqe->uring_cmd_flags 2536dcabcd3SJens Axboe * IORING_URING_CMD_FIXED use registered buffer; pass this flag 2549cda70f6SAnuj Gupta * along with setting sqe->buf_index. 2559408d8a3SKeith Busch * IORING_URING_CMD_POLLED driver use only 2569cda70f6SAnuj Gupta */ 2579cda70f6SAnuj Gupta #define IORING_URING_CMD_FIXED (1U << 0) 2589408d8a3SKeith Busch #define IORING_URING_CMD_POLLED (1U << 31) 2599cda70f6SAnuj Gupta 2609cda70f6SAnuj Gupta 2619cda70f6SAnuj Gupta /* 262c992fe29SChristoph Hellwig * sqe->fsync_flags 263c992fe29SChristoph Hellwig */ 264c992fe29SChristoph Hellwig #define IORING_FSYNC_DATASYNC (1U << 0) 2652b188cc1SJens Axboe 2662b188cc1SJens Axboe /* 267a41525abSJens Axboe * sqe->timeout_flags 268a41525abSJens Axboe */ 269a41525abSJens Axboe #define IORING_TIMEOUT_ABS (1U << 0) 2709c8e11b3SPavel Begunkov #define IORING_TIMEOUT_UPDATE (1U << 1) 27150c1df2bSJens Axboe #define IORING_TIMEOUT_BOOTTIME (1U << 2) 27250c1df2bSJens Axboe #define IORING_TIMEOUT_REALTIME (1U << 3) 273f1042b6cSPavel Begunkov #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) 2746224590dSPavel Begunkov #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) 275ea97f6c8SDavid Wei #define IORING_TIMEOUT_MULTISHOT (1U << 6) 27650c1df2bSJens Axboe #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) 277f1042b6cSPavel Begunkov #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) 278a41525abSJens Axboe /* 2797d67af2cSPavel Begunkov * sqe->splice_flags 2807d67af2cSPavel Begunkov * extends splice(2) flags 2817d67af2cSPavel Begunkov */ 2827d67af2cSPavel Begunkov #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ 2837d67af2cSPavel Begunkov 2847d67af2cSPavel Begunkov /* 28588e41cf9SJens Axboe * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the 28688e41cf9SJens Axboe * command flags for POLL_ADD are stored in sqe->len. 28788e41cf9SJens Axboe * 28888e41cf9SJens Axboe * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if 28988e41cf9SJens Axboe * the poll handler will continue to report 29088e41cf9SJens Axboe * CQEs on behalf of the same SQE. 291b69de288SJens Axboe * 292b69de288SJens Axboe * IORING_POLL_UPDATE Update existing poll request, matching 293b69de288SJens Axboe * sqe->addr as the old user_data field. 294b9ba8a44SJens Axboe * 295b9ba8a44SJens Axboe * IORING_POLL_LEVEL Level triggered poll. 29688e41cf9SJens Axboe */ 29788e41cf9SJens Axboe #define IORING_POLL_ADD_MULTI (1U << 0) 298b69de288SJens Axboe #define IORING_POLL_UPDATE_EVENTS (1U << 1) 299b69de288SJens Axboe #define IORING_POLL_UPDATE_USER_DATA (1U << 2) 300b9ba8a44SJens Axboe #define IORING_POLL_ADD_LEVEL (1U << 3) 30188e41cf9SJens Axboe 30288e41cf9SJens Axboe /* 3038e29da69SJens Axboe * ASYNC_CANCEL flags. 3048e29da69SJens Axboe * 3058e29da69SJens Axboe * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key 3064bf94615SJens Axboe * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the 3074bf94615SJens Axboe * request 'user_data' 308970f256eSJens Axboe * IORING_ASYNC_CANCEL_ANY Match any request 3097d8ca725SJens Axboe * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor 3108165b566SJens Axboe * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key 311d7b8b079SJens Axboe * IORING_ASYNC_CANCEL_OP Match request based on opcode 3128e29da69SJens Axboe */ 3138e29da69SJens Axboe #define IORING_ASYNC_CANCEL_ALL (1U << 0) 3144bf94615SJens Axboe #define IORING_ASYNC_CANCEL_FD (1U << 1) 315970f256eSJens Axboe #define IORING_ASYNC_CANCEL_ANY (1U << 2) 3167d8ca725SJens Axboe #define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3) 3178165b566SJens Axboe #define IORING_ASYNC_CANCEL_USERDATA (1U << 4) 318d7b8b079SJens Axboe #define IORING_ASYNC_CANCEL_OP (1U << 5) 3198e29da69SJens Axboe 3208e29da69SJens Axboe /* 32129c1ac23SPavel Begunkov * send/sendmsg and recv/recvmsg flags (sqe->ioprio) 3220455d4ccSJens Axboe * 3230455d4ccSJens Axboe * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send 3240455d4ccSJens Axboe * or receive and arm poll if that yields an 3250455d4ccSJens Axboe * -EAGAIN result, arm poll upfront and skip 3260455d4ccSJens Axboe * the initial transfer attempt. 327b3fdea6eSDylan Yudaken * 328b3fdea6eSDylan Yudaken * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if 329b3fdea6eSDylan Yudaken * the handler will continue to report 330b3fdea6eSDylan Yudaken * CQEs on behalf of the same SQE. 33110c7d33eSPavel Begunkov * 33210c7d33eSPavel Begunkov * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in 33310c7d33eSPavel Begunkov * the buf_index field. 334e307e669SStefan Metzmacher * 335e307e669SStefan Metzmacher * IORING_SEND_ZC_REPORT_USAGE 336e307e669SStefan Metzmacher * If set, SEND[MSG]_ZC should report 337e307e669SStefan Metzmacher * the zerocopy usage in cqe.res 338e307e669SStefan Metzmacher * for the IORING_CQE_F_NOTIF cqe. 339e307e669SStefan Metzmacher * 0 is reported if zerocopy was actually possible. 340e307e669SStefan Metzmacher * IORING_NOTIF_USAGE_ZC_COPIED if data was copied 341e307e669SStefan Metzmacher * (at least partially). 3420455d4ccSJens Axboe */ 3430455d4ccSJens Axboe #define IORING_RECVSEND_POLL_FIRST (1U << 0) 344b3fdea6eSDylan Yudaken #define IORING_RECV_MULTISHOT (1U << 1) 34510c7d33eSPavel Begunkov #define IORING_RECVSEND_FIXED_BUF (1U << 2) 346e307e669SStefan Metzmacher #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 347e307e669SStefan Metzmacher 348e307e669SStefan Metzmacher /* 349e307e669SStefan Metzmacher * cqe.res for IORING_CQE_F_NOTIF if 350e307e669SStefan Metzmacher * IORING_SEND_ZC_REPORT_USAGE was requested 351e307e669SStefan Metzmacher * 352e307e669SStefan Metzmacher * It should be treated as a flag, all other 353e307e669SStefan Metzmacher * bits of cqe.res should be treated as reserved! 354e307e669SStefan Metzmacher */ 355e307e669SStefan Metzmacher #define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31) 3560455d4ccSJens Axboe 3570455d4ccSJens Axboe /* 358390ed29bSHao Xu * accept flags stored in sqe->ioprio 359390ed29bSHao Xu */ 360390ed29bSHao Xu #define IORING_ACCEPT_MULTISHOT (1U << 0) 361390ed29bSHao Xu 362390ed29bSHao Xu /* 363e6130ebaSJens Axboe * IORING_OP_MSG_RING command types, stored in sqe->addr 364e6130ebaSJens Axboe */ 365e6130ebaSJens Axboe enum { 366e6130ebaSJens Axboe IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ 367e6130ebaSJens Axboe IORING_MSG_SEND_FD, /* send a registered fd to another ring */ 368e6130ebaSJens Axboe }; 369e6130ebaSJens Axboe 370e6130ebaSJens Axboe /* 371e6130ebaSJens Axboe * IORING_OP_MSG_RING flags (sqe->msg_ring_flags) 372e6130ebaSJens Axboe * 373e6130ebaSJens Axboe * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not 374e6130ebaSJens Axboe * applicable for IORING_MSG_DATA, obviously. 375e6130ebaSJens Axboe */ 376e6130ebaSJens Axboe #define IORING_MSG_RING_CQE_SKIP (1U << 0) 377cbeb47a7SBreno Leitao /* Pass through the flags from sqe->file_index to cqe->flags */ 378cbeb47a7SBreno Leitao #define IORING_MSG_RING_FLAGS_PASS (1U << 1) 379e6130ebaSJens Axboe 380e6130ebaSJens Axboe /* 3812b188cc1SJens Axboe * IO completion data structure (Completion Queue Entry) 3822b188cc1SJens Axboe */ 3832b188cc1SJens Axboe struct io_uring_cqe { 3842b188cc1SJens Axboe __u64 user_data; /* sqe->data submission passed back */ 3852b188cc1SJens Axboe __s32 res; /* result code for this event */ 3862b188cc1SJens Axboe __u32 flags; 3877a51e5b4SStefan Roesch 3887a51e5b4SStefan Roesch /* 3897a51e5b4SStefan Roesch * If the ring is initialized with IORING_SETUP_CQE32, then this field 3907a51e5b4SStefan Roesch * contains 16-bytes of padding, doubling the size of the CQE. 3917a51e5b4SStefan Roesch */ 3927a51e5b4SStefan Roesch __u64 big_cqe[]; 3932b188cc1SJens Axboe }; 3942b188cc1SJens Axboe 3952b188cc1SJens Axboe /* 396bcda7baaSJens Axboe * cqe->flags 397bcda7baaSJens Axboe * 398bcda7baaSJens Axboe * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID 39988e41cf9SJens Axboe * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries 400f548a12eSJens Axboe * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv 401b48c312bSPavel Begunkov * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct 402b48c312bSPavel Begunkov * them from sends. 403bcda7baaSJens Axboe */ 404bcda7baaSJens Axboe #define IORING_CQE_F_BUFFER (1U << 0) 40588e41cf9SJens Axboe #define IORING_CQE_F_MORE (1U << 1) 406f548a12eSJens Axboe #define IORING_CQE_F_SOCK_NONEMPTY (1U << 2) 407b48c312bSPavel Begunkov #define IORING_CQE_F_NOTIF (1U << 3) 408bcda7baaSJens Axboe 409bcda7baaSJens Axboe enum { 410bcda7baaSJens Axboe IORING_CQE_BUFFER_SHIFT = 16, 411bcda7baaSJens Axboe }; 412bcda7baaSJens Axboe 413bcda7baaSJens Axboe /* 4142b188cc1SJens Axboe * Magic offsets for the application to mmap the data it needs 4152b188cc1SJens Axboe */ 4162b188cc1SJens Axboe #define IORING_OFF_SQ_RING 0ULL 4172b188cc1SJens Axboe #define IORING_OFF_CQ_RING 0x8000000ULL 4182b188cc1SJens Axboe #define IORING_OFF_SQES 0x10000000ULL 419c56e022cSJens Axboe #define IORING_OFF_PBUF_RING 0x80000000ULL 420c56e022cSJens Axboe #define IORING_OFF_PBUF_SHIFT 16 421c56e022cSJens Axboe #define IORING_OFF_MMAP_MASK 0xf8000000ULL 4222b188cc1SJens Axboe 4232b188cc1SJens Axboe /* 4242b188cc1SJens Axboe * Filled with the offset for mmap(2) 4252b188cc1SJens Axboe */ 4262b188cc1SJens Axboe struct io_sqring_offsets { 4272b188cc1SJens Axboe __u32 head; 4282b188cc1SJens Axboe __u32 tail; 4292b188cc1SJens Axboe __u32 ring_mask; 4302b188cc1SJens Axboe __u32 ring_entries; 4312b188cc1SJens Axboe __u32 flags; 4322b188cc1SJens Axboe __u32 dropped; 4332b188cc1SJens Axboe __u32 array; 4342b188cc1SJens Axboe __u32 resv1; 43503d89a2dSJens Axboe __u64 user_addr; 4362b188cc1SJens Axboe }; 4372b188cc1SJens Axboe 4386c271ce2SJens Axboe /* 4396c271ce2SJens Axboe * sq_ring->flags 4406c271ce2SJens Axboe */ 4416c271ce2SJens Axboe #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ 4426d5f9049SXiaoguang Wang #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ 443ef060ea9SJens Axboe #define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */ 4446c271ce2SJens Axboe 4452b188cc1SJens Axboe struct io_cqring_offsets { 4462b188cc1SJens Axboe __u32 head; 4472b188cc1SJens Axboe __u32 tail; 4482b188cc1SJens Axboe __u32 ring_mask; 4492b188cc1SJens Axboe __u32 ring_entries; 4502b188cc1SJens Axboe __u32 overflow; 4512b188cc1SJens Axboe __u32 cqes; 4520d9b5b3aSStefano Garzarella __u32 flags; 4530d9b5b3aSStefano Garzarella __u32 resv1; 45403d89a2dSJens Axboe __u64 user_addr; 4552b188cc1SJens Axboe }; 4562b188cc1SJens Axboe 4572b188cc1SJens Axboe /* 4587e55a19cSStefano Garzarella * cq_ring->flags 4597e55a19cSStefano Garzarella */ 4607e55a19cSStefano Garzarella 4617e55a19cSStefano Garzarella /* disable eventfd notifications */ 4627e55a19cSStefano Garzarella #define IORING_CQ_EVENTFD_DISABLED (1U << 0) 4637e55a19cSStefano Garzarella 4647e55a19cSStefano Garzarella /* 4652b188cc1SJens Axboe * io_uring_enter(2) flags 4662b188cc1SJens Axboe */ 4672b188cc1SJens Axboe #define IORING_ENTER_GETEVENTS (1U << 0) 4686c271ce2SJens Axboe #define IORING_ENTER_SQ_WAKEUP (1U << 1) 46990554200SJens Axboe #define IORING_ENTER_SQ_WAIT (1U << 2) 470c73ebb68SHao Xu #define IORING_ENTER_EXT_ARG (1U << 3) 471e7a6c00dSJens Axboe #define IORING_ENTER_REGISTERED_RING (1U << 4) 4722b188cc1SJens Axboe 4732b188cc1SJens Axboe /* 4742b188cc1SJens Axboe * Passed in for io_uring_setup(2). Copied back with updated info on success 4752b188cc1SJens Axboe */ 4762b188cc1SJens Axboe struct io_uring_params { 4772b188cc1SJens Axboe __u32 sq_entries; 4782b188cc1SJens Axboe __u32 cq_entries; 4792b188cc1SJens Axboe __u32 flags; 4806c271ce2SJens Axboe __u32 sq_thread_cpu; 4816c271ce2SJens Axboe __u32 sq_thread_idle; 482ac90f249SJens Axboe __u32 features; 48324369c2eSPavel Begunkov __u32 wq_fd; 48424369c2eSPavel Begunkov __u32 resv[3]; 4852b188cc1SJens Axboe struct io_sqring_offsets sq_off; 4862b188cc1SJens Axboe struct io_cqring_offsets cq_off; 4872b188cc1SJens Axboe }; 4882b188cc1SJens Axboe 489edafcceeSJens Axboe /* 490ac90f249SJens Axboe * io_uring_params->features flags 491ac90f249SJens Axboe */ 492ac90f249SJens Axboe #define IORING_FEAT_SINGLE_MMAP (1U << 0) 4931d7bb1d5SJens Axboe #define IORING_FEAT_NODROP (1U << 1) 494da8c9690SJens Axboe #define IORING_FEAT_SUBMIT_STABLE (1U << 2) 495ba04291eSJens Axboe #define IORING_FEAT_RW_CUR_POS (1U << 3) 496cccf0ee8SJens Axboe #define IORING_FEAT_CUR_PERSONALITY (1U << 4) 497d7718a9dSJens Axboe #define IORING_FEAT_FAST_POLL (1U << 5) 4985769a351SJiufei Xue #define IORING_FEAT_POLL_32BITS (1U << 6) 49928cea78aSJens Axboe #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) 500c73ebb68SHao Xu #define IORING_FEAT_EXT_ARG (1U << 8) 5011c0aa1faSJens Axboe #define IORING_FEAT_NATIVE_WORKERS (1U << 9) 5029690557eSPavel Begunkov #define IORING_FEAT_RSRC_TAGS (1U << 10) 50304c76b41SPavel Begunkov #define IORING_FEAT_CQE_SKIP (1U << 11) 504c4212f3eSJens Axboe #define IORING_FEAT_LINKED_FILE (1U << 12) 5057d3fd88dSJosh Triplett #define IORING_FEAT_REG_REG_RING (1U << 13) 506ac90f249SJens Axboe 507ac90f249SJens Axboe /* 508edafcceeSJens Axboe * io_uring_register(2) opcodes and arguments 509edafcceeSJens Axboe */ 5109d4a75efSStefano Garzarella enum { 5119d4a75efSStefano Garzarella IORING_REGISTER_BUFFERS = 0, 5129d4a75efSStefano Garzarella IORING_UNREGISTER_BUFFERS = 1, 5139d4a75efSStefano Garzarella IORING_REGISTER_FILES = 2, 5149d4a75efSStefano Garzarella IORING_UNREGISTER_FILES = 3, 5159d4a75efSStefano Garzarella IORING_REGISTER_EVENTFD = 4, 5169d4a75efSStefano Garzarella IORING_UNREGISTER_EVENTFD = 5, 5179d4a75efSStefano Garzarella IORING_REGISTER_FILES_UPDATE = 6, 5189d4a75efSStefano Garzarella IORING_REGISTER_EVENTFD_ASYNC = 7, 5199d4a75efSStefano Garzarella IORING_REGISTER_PROBE = 8, 5209d4a75efSStefano Garzarella IORING_REGISTER_PERSONALITY = 9, 5219d4a75efSStefano Garzarella IORING_UNREGISTER_PERSONALITY = 10, 52221b55dbcSStefano Garzarella IORING_REGISTER_RESTRICTIONS = 11, 5237e84e1c7SStefano Garzarella IORING_REGISTER_ENABLE_RINGS = 12, 524992da01aSPavel Begunkov 525992da01aSPavel Begunkov /* extended with tagging */ 526992da01aSPavel Begunkov IORING_REGISTER_FILES2 = 13, 527992da01aSPavel Begunkov IORING_REGISTER_FILES_UPDATE2 = 14, 528992da01aSPavel Begunkov IORING_REGISTER_BUFFERS2 = 15, 529992da01aSPavel Begunkov IORING_REGISTER_BUFFERS_UPDATE = 16, 5309d4a75efSStefano Garzarella 531fe76421dSJens Axboe /* set/clear io-wq thread affinities */ 532fe76421dSJens Axboe IORING_REGISTER_IOWQ_AFF = 17, 533fe76421dSJens Axboe IORING_UNREGISTER_IOWQ_AFF = 18, 534fe76421dSJens Axboe 535dd47c104SEugene Syromiatnikov /* set/get max number of io-wq workers */ 5362e480058SJens Axboe IORING_REGISTER_IOWQ_MAX_WORKERS = 19, 5372e480058SJens Axboe 538e7a6c00dSJens Axboe /* register/unregister io_uring fd with the ring */ 539e7a6c00dSJens Axboe IORING_REGISTER_RING_FDS = 20, 540e7a6c00dSJens Axboe IORING_UNREGISTER_RING_FDS = 21, 541e7a6c00dSJens Axboe 542c7fb1942SJens Axboe /* register ring based provide buffer group */ 543c7fb1942SJens Axboe IORING_REGISTER_PBUF_RING = 22, 544c7fb1942SJens Axboe IORING_UNREGISTER_PBUF_RING = 23, 545c7fb1942SJens Axboe 54678a861b9SJens Axboe /* sync cancelation API */ 54778a861b9SJens Axboe IORING_REGISTER_SYNC_CANCEL = 24, 54878a861b9SJens Axboe 5496e73dffbSPavel Begunkov /* register a range of fixed file slots for automatic slot allocation */ 5506e73dffbSPavel Begunkov IORING_REGISTER_FILE_ALLOC_RANGE = 25, 5516e73dffbSPavel Begunkov 5529d4a75efSStefano Garzarella /* this goes last */ 5537d3fd88dSJosh Triplett IORING_REGISTER_LAST, 5547d3fd88dSJosh Triplett 5557d3fd88dSJosh Triplett /* flag added to the opcode to use a registered ring fd */ 5567d3fd88dSJosh Triplett IORING_REGISTER_USE_REGISTERED_RING = 1U << 31 5579d4a75efSStefano Garzarella }; 558c3a31e60SJens Axboe 559dd47c104SEugene Syromiatnikov /* io-wq worker categories */ 560dd47c104SEugene Syromiatnikov enum { 561dd47c104SEugene Syromiatnikov IO_WQ_BOUND, 562dd47c104SEugene Syromiatnikov IO_WQ_UNBOUND, 563dd47c104SEugene Syromiatnikov }; 564dd47c104SEugene Syromiatnikov 565269bbe5fSBijan Mottahedeh /* deprecated, see struct io_uring_rsrc_update */ 566c3a31e60SJens Axboe struct io_uring_files_update { 567c3a31e60SJens Axboe __u32 offset; 5681292e972SEugene Syromiatnikov __u32 resv; 5691292e972SEugene Syromiatnikov __aligned_u64 /* __s32 * */ fds; 570c3a31e60SJens Axboe }; 571edafcceeSJens Axboe 572a8da73a3SJens Axboe /* 573a8da73a3SJens Axboe * Register a fully sparse file space, rather than pass in an array of all 574a8da73a3SJens Axboe * -1 file descriptors. 575a8da73a3SJens Axboe */ 576a8da73a3SJens Axboe #define IORING_RSRC_REGISTER_SPARSE (1U << 0) 577a8da73a3SJens Axboe 578792e3582SPavel Begunkov struct io_uring_rsrc_register { 579792e3582SPavel Begunkov __u32 nr; 580a8da73a3SJens Axboe __u32 flags; 581992da01aSPavel Begunkov __u64 resv2; 582792e3582SPavel Begunkov __aligned_u64 data; 583792e3582SPavel Begunkov __aligned_u64 tags; 584792e3582SPavel Begunkov }; 585792e3582SPavel Begunkov 586c3bdad02SPavel Begunkov struct io_uring_rsrc_update { 587c3bdad02SPavel Begunkov __u32 offset; 588c3bdad02SPavel Begunkov __u32 resv; 589c3bdad02SPavel Begunkov __aligned_u64 data; 590c3bdad02SPavel Begunkov }; 591c3bdad02SPavel Begunkov 592c3bdad02SPavel Begunkov struct io_uring_rsrc_update2 { 593c3bdad02SPavel Begunkov __u32 offset; 594c3bdad02SPavel Begunkov __u32 resv; 595c3bdad02SPavel Begunkov __aligned_u64 data; 596c3bdad02SPavel Begunkov __aligned_u64 tags; 597c3bdad02SPavel Begunkov __u32 nr; 598992da01aSPavel Begunkov __u32 resv2; 599c3bdad02SPavel Begunkov }; 600c3bdad02SPavel Begunkov 6014e0377a1Snoah /* Skip updating fd indexes set to this value in the fd table */ 6024e0377a1Snoah #define IORING_REGISTER_FILES_SKIP (-2) 6034e0377a1Snoah 60466f4af93SJens Axboe #define IO_URING_OP_SUPPORTED (1U << 0) 60566f4af93SJens Axboe 60666f4af93SJens Axboe struct io_uring_probe_op { 60766f4af93SJens Axboe __u8 op; 60866f4af93SJens Axboe __u8 resv; 60966f4af93SJens Axboe __u16 flags; /* IO_URING_OP_* flags */ 61066f4af93SJens Axboe __u32 resv2; 61166f4af93SJens Axboe }; 61266f4af93SJens Axboe 61366f4af93SJens Axboe struct io_uring_probe { 61466f4af93SJens Axboe __u8 last_op; /* last opcode supported */ 61566f4af93SJens Axboe __u8 ops_len; /* length of ops[] array below */ 61666f4af93SJens Axboe __u16 resv; 61766f4af93SJens Axboe __u32 resv2[3]; 6188fcf4c48SGustavo A. R. Silva struct io_uring_probe_op ops[]; 61966f4af93SJens Axboe }; 62066f4af93SJens Axboe 62121b55dbcSStefano Garzarella struct io_uring_restriction { 62221b55dbcSStefano Garzarella __u16 opcode; 62321b55dbcSStefano Garzarella union { 62421b55dbcSStefano Garzarella __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */ 62521b55dbcSStefano Garzarella __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */ 62621b55dbcSStefano Garzarella __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */ 62721b55dbcSStefano Garzarella }; 62821b55dbcSStefano Garzarella __u8 resv; 62921b55dbcSStefano Garzarella __u32 resv2[3]; 63021b55dbcSStefano Garzarella }; 63121b55dbcSStefano Garzarella 632c7fb1942SJens Axboe struct io_uring_buf { 633c7fb1942SJens Axboe __u64 addr; 634c7fb1942SJens Axboe __u32 len; 635c7fb1942SJens Axboe __u16 bid; 636c7fb1942SJens Axboe __u16 resv; 637c7fb1942SJens Axboe }; 638c7fb1942SJens Axboe 639c7fb1942SJens Axboe struct io_uring_buf_ring { 640c7fb1942SJens Axboe union { 641c7fb1942SJens Axboe /* 642c7fb1942SJens Axboe * To avoid spilling into more pages than we need to, the 643c7fb1942SJens Axboe * ring tail is overlaid with the io_uring_buf->resv field. 644c7fb1942SJens Axboe */ 645c7fb1942SJens Axboe struct { 646c7fb1942SJens Axboe __u64 resv1; 647c7fb1942SJens Axboe __u32 resv2; 648c7fb1942SJens Axboe __u16 resv3; 649c7fb1942SJens Axboe __u16 tail; 650c7fb1942SJens Axboe }; 65136632d06SKees Cook __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs); 652c7fb1942SJens Axboe }; 653c7fb1942SJens Axboe }; 654c7fb1942SJens Axboe 655c56e022cSJens Axboe /* 656c56e022cSJens Axboe * Flags for IORING_REGISTER_PBUF_RING. 657c56e022cSJens Axboe * 658c56e022cSJens Axboe * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring. 659c56e022cSJens Axboe * The application must not set a ring_addr in struct 660c56e022cSJens Axboe * io_uring_buf_reg, instead it must subsequently call 661c56e022cSJens Axboe * mmap(2) with the offset set as: 662c56e022cSJens Axboe * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT) 663c56e022cSJens Axboe * to get a virtual mapping for the ring. 664c56e022cSJens Axboe */ 665c56e022cSJens Axboe enum { 666c56e022cSJens Axboe IOU_PBUF_RING_MMAP = 1, 667c56e022cSJens Axboe }; 668c56e022cSJens Axboe 669c7fb1942SJens Axboe /* argument for IORING_(UN)REGISTER_PBUF_RING */ 670c7fb1942SJens Axboe struct io_uring_buf_reg { 671c7fb1942SJens Axboe __u64 ring_addr; 672c7fb1942SJens Axboe __u32 ring_entries; 673c7fb1942SJens Axboe __u16 bgid; 67481cf17cdSJens Axboe __u16 flags; 675c7fb1942SJens Axboe __u64 resv[3]; 676c7fb1942SJens Axboe }; 677c7fb1942SJens Axboe 67821b55dbcSStefano Garzarella /* 67921b55dbcSStefano Garzarella * io_uring_restriction->opcode values 68021b55dbcSStefano Garzarella */ 68121b55dbcSStefano Garzarella enum { 68221b55dbcSStefano Garzarella /* Allow an io_uring_register(2) opcode */ 68321b55dbcSStefano Garzarella IORING_RESTRICTION_REGISTER_OP = 0, 68421b55dbcSStefano Garzarella 68521b55dbcSStefano Garzarella /* Allow an sqe opcode */ 68621b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_OP = 1, 68721b55dbcSStefano Garzarella 68821b55dbcSStefano Garzarella /* Allow sqe flags */ 68921b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, 69021b55dbcSStefano Garzarella 69121b55dbcSStefano Garzarella /* Require sqe flags (these flags must be set on each submission) */ 69221b55dbcSStefano Garzarella IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, 69321b55dbcSStefano Garzarella 69421b55dbcSStefano Garzarella IORING_RESTRICTION_LAST 69521b55dbcSStefano Garzarella }; 69621b55dbcSStefano Garzarella 697c73ebb68SHao Xu struct io_uring_getevents_arg { 698c73ebb68SHao Xu __u64 sigmask; 699c73ebb68SHao Xu __u32 sigmask_sz; 700c73ebb68SHao Xu __u32 pad; 701c73ebb68SHao Xu __u64 ts; 702c73ebb68SHao Xu }; 703c73ebb68SHao Xu 70478a861b9SJens Axboe /* 70578a861b9SJens Axboe * Argument for IORING_REGISTER_SYNC_CANCEL 70678a861b9SJens Axboe */ 70778a861b9SJens Axboe struct io_uring_sync_cancel_reg { 70878a861b9SJens Axboe __u64 addr; 70978a861b9SJens Axboe __s32 fd; 71078a861b9SJens Axboe __u32 flags; 71178a861b9SJens Axboe struct __kernel_timespec timeout; 712f77569d2SJens Axboe __u8 opcode; 713f77569d2SJens Axboe __u8 pad[7]; 714f77569d2SJens Axboe __u64 pad2[3]; 71578a861b9SJens Axboe }; 71678a861b9SJens Axboe 7176e73dffbSPavel Begunkov /* 7186e73dffbSPavel Begunkov * Argument for IORING_REGISTER_FILE_ALLOC_RANGE 7196e73dffbSPavel Begunkov * The range is specified as [off, off + len) 7206e73dffbSPavel Begunkov */ 7216e73dffbSPavel Begunkov struct io_uring_file_index_range { 7226e73dffbSPavel Begunkov __u32 off; 7236e73dffbSPavel Begunkov __u32 len; 7246e73dffbSPavel Begunkov __u64 resv; 7256e73dffbSPavel Begunkov }; 7266e73dffbSPavel Begunkov 7279bb66906SDylan Yudaken struct io_uring_recvmsg_out { 7289bb66906SDylan Yudaken __u32 namelen; 7299bb66906SDylan Yudaken __u32 controllen; 7309bb66906SDylan Yudaken __u32 payloadlen; 7319bb66906SDylan Yudaken __u32 flags; 7329bb66906SDylan Yudaken }; 7339bb66906SDylan Yudaken 7348e9fad0eSBreno Leitao /* 7358e9fad0eSBreno Leitao * Argument for IORING_OP_URING_CMD when file is a socket 7368e9fad0eSBreno Leitao */ 7378e9fad0eSBreno Leitao enum { 7388e9fad0eSBreno Leitao SOCKET_URING_OP_SIOCINQ = 0, 7398e9fad0eSBreno Leitao SOCKET_URING_OP_SIOCOUTQ, 7408e9fad0eSBreno Leitao }; 7418e9fad0eSBreno Leitao 742e1d0c6d0SAmmar Faizi #ifdef __cplusplus 743e1d0c6d0SAmmar Faizi } 744e1d0c6d0SAmmar Faizi #endif 745e1d0c6d0SAmmar Faizi 7462b188cc1SJens Axboe #endif 747