1 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ 2 /* 3 * Header file for the io_uring interface. 4 * 5 * Copyright (C) 2019 Jens Axboe 6 * Copyright (C) 2019 Christoph Hellwig 7 */ 8 #ifndef LINUX_IO_URING_H 9 #define LINUX_IO_URING_H 10 11 #include <linux/fs.h> 12 #include <linux/types.h> 13 /* 14 * this file is shared with liburing and that has to autodetect 15 * if linux/time_types.h is available or not, it can 16 * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 17 * if linux/time_types.h is not available 18 */ 19 #ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H 20 #include <linux/time_types.h> 21 #endif 22 23 #ifdef __cplusplus 24 extern "C" { 25 #endif 26 27 /* 28 * IO submission data structure (Submission Queue Entry) 29 */ 30 struct io_uring_sqe { 31 __u8 opcode; /* type of operation for this sqe */ 32 __u8 flags; /* IOSQE_ flags */ 33 __u16 ioprio; /* ioprio for the request */ 34 __s32 fd; /* file descriptor to do IO on */ 35 union { 36 __u64 off; /* offset into file */ 37 __u64 addr2; 38 struct { 39 __u32 cmd_op; 40 __u32 __pad1; 41 }; 42 }; 43 union { 44 __u64 addr; /* pointer to buffer or iovecs */ 45 __u64 splice_off_in; 46 struct { 47 __u32 level; 48 __u32 optname; 49 }; 50 }; 51 __u32 len; /* buffer size or number of iovecs */ 52 union { 53 __kernel_rwf_t rw_flags; 54 __u32 fsync_flags; 55 __u16 poll_events; /* compatibility */ 56 __u32 poll32_events; /* word-reversed for BE */ 57 __u32 sync_range_flags; 58 __u32 msg_flags; 59 __u32 timeout_flags; 60 __u32 accept_flags; 61 __u32 cancel_flags; 62 __u32 open_flags; 63 __u32 statx_flags; 64 __u32 fadvise_advice; 65 __u32 splice_flags; 66 __u32 rename_flags; 67 __u32 unlink_flags; 68 __u32 hardlink_flags; 69 __u32 xattr_flags; 70 __u32 msg_ring_flags; 71 __u32 uring_cmd_flags; 72 __u32 waitid_flags; 73 __u32 futex_flags; 74 __u32 install_fd_flags; 75 __u32 nop_flags; 76 }; 77 __u64 user_data; /* data to be passed back at completion time */ 78 /* pack this to avoid bogus arm OABI complaints */ 79 union { 80 /* index into fixed buffers, if used */ 81 __u16 buf_index; 82 /* for grouped buffer selection */ 83 __u16 buf_group; 84 } __attribute__((packed)); 85 /* personality to use, if used */ 86 __u16 personality; 87 union { 88 __s32 splice_fd_in; 89 __u32 file_index; 90 __u32 optlen; 91 struct { 92 __u16 addr_len; 93 __u16 __pad3[1]; 94 }; 95 }; 96 union { 97 struct { 98 __u64 addr3; 99 __u64 __pad2[1]; 100 }; 101 __u64 optval; 102 /* 103 * If the ring is initialized with IORING_SETUP_SQE128, then 104 * this field is used for 80 bytes of arbitrary command data 105 */ 106 __u8 cmd[0]; 107 }; 108 }; 109 110 /* 111 * If sqe->file_index is set to this for opcodes that instantiate a new 112 * direct descriptor (like openat/openat2/accept), then io_uring will allocate 113 * an available direct descriptor instead of having the application pass one 114 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE 115 * if the space is full. 116 */ 117 #define IORING_FILE_INDEX_ALLOC (~0U) 118 119 enum io_uring_sqe_flags_bit { 120 IOSQE_FIXED_FILE_BIT, 121 IOSQE_IO_DRAIN_BIT, 122 IOSQE_IO_LINK_BIT, 123 IOSQE_IO_HARDLINK_BIT, 124 IOSQE_ASYNC_BIT, 125 IOSQE_BUFFER_SELECT_BIT, 126 IOSQE_CQE_SKIP_SUCCESS_BIT, 127 }; 128 129 /* 130 * sqe->flags 131 */ 132 /* use fixed fileset */ 133 #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT) 134 /* issue after inflight IO */ 135 #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT) 136 /* links next sqe */ 137 #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT) 138 /* like LINK, but stronger */ 139 #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) 140 /* always go async */ 141 #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) 142 /* select buffer from sqe->buf_group */ 143 #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) 144 /* don't post CQE if request succeeded */ 145 #define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT) 146 147 /* 148 * io_uring_setup() flags 149 */ 150 #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ 151 #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ 152 #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ 153 #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ 154 #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ 155 #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ 156 #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ 157 #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ 158 /* 159 * Cooperative task running. When requests complete, they often require 160 * forcing the submitter to transition to the kernel to complete. If this 161 * flag is set, work will be done when the task transitions anyway, rather 162 * than force an inter-processor interrupt reschedule. This avoids interrupting 163 * a task running in userspace, and saves an IPI. 164 */ 165 #define IORING_SETUP_COOP_TASKRUN (1U << 8) 166 /* 167 * If COOP_TASKRUN is set, get notified if task work is available for 168 * running and a kernel transition would be needed to run it. This sets 169 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. 170 */ 171 #define IORING_SETUP_TASKRUN_FLAG (1U << 9) 172 #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */ 173 #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */ 174 /* 175 * Only one task is allowed to submit requests 176 */ 177 #define IORING_SETUP_SINGLE_ISSUER (1U << 12) 178 179 /* 180 * Defer running task work to get events. 181 * Rather than running bits of task work whenever the task transitions 182 * try to do it just before it is needed. 183 */ 184 #define IORING_SETUP_DEFER_TASKRUN (1U << 13) 185 186 /* 187 * Application provides the memory for the rings 188 */ 189 #define IORING_SETUP_NO_MMAP (1U << 14) 190 191 /* 192 * Register the ring fd in itself for use with 193 * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather 194 * than an fd. 195 */ 196 #define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15) 197 198 /* 199 * Removes indirection through the SQ index array. 200 */ 201 #define IORING_SETUP_NO_SQARRAY (1U << 16) 202 203 /* Use hybrid poll in iopoll process */ 204 #define IORING_SETUP_HYBRID_IOPOLL (1U << 17) 205 206 enum io_uring_op { 207 IORING_OP_NOP, 208 IORING_OP_READV, 209 IORING_OP_WRITEV, 210 IORING_OP_FSYNC, 211 IORING_OP_READ_FIXED, 212 IORING_OP_WRITE_FIXED, 213 IORING_OP_POLL_ADD, 214 IORING_OP_POLL_REMOVE, 215 IORING_OP_SYNC_FILE_RANGE, 216 IORING_OP_SENDMSG, 217 IORING_OP_RECVMSG, 218 IORING_OP_TIMEOUT, 219 IORING_OP_TIMEOUT_REMOVE, 220 IORING_OP_ACCEPT, 221 IORING_OP_ASYNC_CANCEL, 222 IORING_OP_LINK_TIMEOUT, 223 IORING_OP_CONNECT, 224 IORING_OP_FALLOCATE, 225 IORING_OP_OPENAT, 226 IORING_OP_CLOSE, 227 IORING_OP_FILES_UPDATE, 228 IORING_OP_STATX, 229 IORING_OP_READ, 230 IORING_OP_WRITE, 231 IORING_OP_FADVISE, 232 IORING_OP_MADVISE, 233 IORING_OP_SEND, 234 IORING_OP_RECV, 235 IORING_OP_OPENAT2, 236 IORING_OP_EPOLL_CTL, 237 IORING_OP_SPLICE, 238 IORING_OP_PROVIDE_BUFFERS, 239 IORING_OP_REMOVE_BUFFERS, 240 IORING_OP_TEE, 241 IORING_OP_SHUTDOWN, 242 IORING_OP_RENAMEAT, 243 IORING_OP_UNLINKAT, 244 IORING_OP_MKDIRAT, 245 IORING_OP_SYMLINKAT, 246 IORING_OP_LINKAT, 247 IORING_OP_MSG_RING, 248 IORING_OP_FSETXATTR, 249 IORING_OP_SETXATTR, 250 IORING_OP_FGETXATTR, 251 IORING_OP_GETXATTR, 252 IORING_OP_SOCKET, 253 IORING_OP_URING_CMD, 254 IORING_OP_SEND_ZC, 255 IORING_OP_SENDMSG_ZC, 256 IORING_OP_READ_MULTISHOT, 257 IORING_OP_WAITID, 258 IORING_OP_FUTEX_WAIT, 259 IORING_OP_FUTEX_WAKE, 260 IORING_OP_FUTEX_WAITV, 261 IORING_OP_FIXED_FD_INSTALL, 262 IORING_OP_FTRUNCATE, 263 IORING_OP_BIND, 264 IORING_OP_LISTEN, 265 266 /* this goes last, obviously */ 267 IORING_OP_LAST, 268 }; 269 270 /* 271 * sqe->uring_cmd_flags top 8bits aren't available for userspace 272 * IORING_URING_CMD_FIXED use registered buffer; pass this flag 273 * along with setting sqe->buf_index. 274 */ 275 #define IORING_URING_CMD_FIXED (1U << 0) 276 #define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED 277 278 279 /* 280 * sqe->fsync_flags 281 */ 282 #define IORING_FSYNC_DATASYNC (1U << 0) 283 284 /* 285 * sqe->timeout_flags 286 */ 287 #define IORING_TIMEOUT_ABS (1U << 0) 288 #define IORING_TIMEOUT_UPDATE (1U << 1) 289 #define IORING_TIMEOUT_BOOTTIME (1U << 2) 290 #define IORING_TIMEOUT_REALTIME (1U << 3) 291 #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) 292 #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) 293 #define IORING_TIMEOUT_MULTISHOT (1U << 6) 294 #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) 295 #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) 296 /* 297 * sqe->splice_flags 298 * extends splice(2) flags 299 */ 300 #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ 301 302 /* 303 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the 304 * command flags for POLL_ADD are stored in sqe->len. 305 * 306 * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if 307 * the poll handler will continue to report 308 * CQEs on behalf of the same SQE. 309 * 310 * IORING_POLL_UPDATE Update existing poll request, matching 311 * sqe->addr as the old user_data field. 312 * 313 * IORING_POLL_LEVEL Level triggered poll. 314 */ 315 #define IORING_POLL_ADD_MULTI (1U << 0) 316 #define IORING_POLL_UPDATE_EVENTS (1U << 1) 317 #define IORING_POLL_UPDATE_USER_DATA (1U << 2) 318 #define IORING_POLL_ADD_LEVEL (1U << 3) 319 320 /* 321 * ASYNC_CANCEL flags. 322 * 323 * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key 324 * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the 325 * request 'user_data' 326 * IORING_ASYNC_CANCEL_ANY Match any request 327 * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor 328 * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key 329 * IORING_ASYNC_CANCEL_OP Match request based on opcode 330 */ 331 #define IORING_ASYNC_CANCEL_ALL (1U << 0) 332 #define IORING_ASYNC_CANCEL_FD (1U << 1) 333 #define IORING_ASYNC_CANCEL_ANY (1U << 2) 334 #define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3) 335 #define IORING_ASYNC_CANCEL_USERDATA (1U << 4) 336 #define IORING_ASYNC_CANCEL_OP (1U << 5) 337 338 /* 339 * send/sendmsg and recv/recvmsg flags (sqe->ioprio) 340 * 341 * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send 342 * or receive and arm poll if that yields an 343 * -EAGAIN result, arm poll upfront and skip 344 * the initial transfer attempt. 345 * 346 * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if 347 * the handler will continue to report 348 * CQEs on behalf of the same SQE. 349 * 350 * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in 351 * the buf_index field. 352 * 353 * IORING_SEND_ZC_REPORT_USAGE 354 * If set, SEND[MSG]_ZC should report 355 * the zerocopy usage in cqe.res 356 * for the IORING_CQE_F_NOTIF cqe. 357 * 0 is reported if zerocopy was actually possible. 358 * IORING_NOTIF_USAGE_ZC_COPIED if data was copied 359 * (at least partially). 360 * 361 * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or 362 * recv will grab as many buffers from the buffer 363 * group ID given and send them all. The completion 364 * result will be the number of buffers send, with 365 * the starting buffer ID in cqe->flags as per 366 * usual for provided buffer usage. The buffers 367 * will be contigious from the starting buffer ID. 368 */ 369 #define IORING_RECVSEND_POLL_FIRST (1U << 0) 370 #define IORING_RECV_MULTISHOT (1U << 1) 371 #define IORING_RECVSEND_FIXED_BUF (1U << 2) 372 #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 373 #define IORING_RECVSEND_BUNDLE (1U << 4) 374 375 /* 376 * cqe.res for IORING_CQE_F_NOTIF if 377 * IORING_SEND_ZC_REPORT_USAGE was requested 378 * 379 * It should be treated as a flag, all other 380 * bits of cqe.res should be treated as reserved! 381 */ 382 #define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31) 383 384 /* 385 * accept flags stored in sqe->ioprio 386 */ 387 #define IORING_ACCEPT_MULTISHOT (1U << 0) 388 #define IORING_ACCEPT_DONTWAIT (1U << 1) 389 #define IORING_ACCEPT_POLL_FIRST (1U << 2) 390 391 /* 392 * IORING_OP_MSG_RING command types, stored in sqe->addr 393 */ 394 enum io_uring_msg_ring_flags { 395 IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ 396 IORING_MSG_SEND_FD, /* send a registered fd to another ring */ 397 }; 398 399 /* 400 * IORING_OP_MSG_RING flags (sqe->msg_ring_flags) 401 * 402 * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not 403 * applicable for IORING_MSG_DATA, obviously. 404 */ 405 #define IORING_MSG_RING_CQE_SKIP (1U << 0) 406 /* Pass through the flags from sqe->file_index to cqe->flags */ 407 #define IORING_MSG_RING_FLAGS_PASS (1U << 1) 408 409 /* 410 * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags) 411 * 412 * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC 413 */ 414 #define IORING_FIXED_FD_NO_CLOEXEC (1U << 0) 415 416 /* 417 * IORING_OP_NOP flags (sqe->nop_flags) 418 * 419 * IORING_NOP_INJECT_RESULT Inject result from sqe->result 420 */ 421 #define IORING_NOP_INJECT_RESULT (1U << 0) 422 #define IORING_NOP_FILE (1U << 1) 423 #define IORING_NOP_FIXED_FILE (1U << 2) 424 #define IORING_NOP_FIXED_BUFFER (1U << 3) 425 426 /* 427 * IO completion data structure (Completion Queue Entry) 428 */ 429 struct io_uring_cqe { 430 __u64 user_data; /* sqe->user_data value passed back */ 431 __s32 res; /* result code for this event */ 432 __u32 flags; 433 434 /* 435 * If the ring is initialized with IORING_SETUP_CQE32, then this field 436 * contains 16-bytes of padding, doubling the size of the CQE. 437 */ 438 __u64 big_cqe[]; 439 }; 440 441 /* 442 * cqe->flags 443 * 444 * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID 445 * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries 446 * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv 447 * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct 448 * them from sends. 449 * IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get 450 * more completions. In other words, the buffer is being 451 * partially consumed, and will be used by the kernel for 452 * more completions. This is only set for buffers used via 453 * the incremental buffer consumption, as provided by 454 * a ring buffer setup with IOU_PBUF_RING_INC. For any 455 * other provided buffer type, all completions with a 456 * buffer passed back is automatically returned to the 457 * application. 458 */ 459 #define IORING_CQE_F_BUFFER (1U << 0) 460 #define IORING_CQE_F_MORE (1U << 1) 461 #define IORING_CQE_F_SOCK_NONEMPTY (1U << 2) 462 #define IORING_CQE_F_NOTIF (1U << 3) 463 #define IORING_CQE_F_BUF_MORE (1U << 4) 464 465 #define IORING_CQE_BUFFER_SHIFT 16 466 467 /* 468 * Magic offsets for the application to mmap the data it needs 469 */ 470 #define IORING_OFF_SQ_RING 0ULL 471 #define IORING_OFF_CQ_RING 0x8000000ULL 472 #define IORING_OFF_SQES 0x10000000ULL 473 #define IORING_OFF_PBUF_RING 0x80000000ULL 474 #define IORING_OFF_PBUF_SHIFT 16 475 #define IORING_OFF_MMAP_MASK 0xf8000000ULL 476 477 /* 478 * Filled with the offset for mmap(2) 479 */ 480 struct io_sqring_offsets { 481 __u32 head; 482 __u32 tail; 483 __u32 ring_mask; 484 __u32 ring_entries; 485 __u32 flags; 486 __u32 dropped; 487 __u32 array; 488 __u32 resv1; 489 __u64 user_addr; 490 }; 491 492 /* 493 * sq_ring->flags 494 */ 495 #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ 496 #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ 497 #define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */ 498 499 struct io_cqring_offsets { 500 __u32 head; 501 __u32 tail; 502 __u32 ring_mask; 503 __u32 ring_entries; 504 __u32 overflow; 505 __u32 cqes; 506 __u32 flags; 507 __u32 resv1; 508 __u64 user_addr; 509 }; 510 511 /* 512 * cq_ring->flags 513 */ 514 515 /* disable eventfd notifications */ 516 #define IORING_CQ_EVENTFD_DISABLED (1U << 0) 517 518 /* 519 * io_uring_enter(2) flags 520 */ 521 #define IORING_ENTER_GETEVENTS (1U << 0) 522 #define IORING_ENTER_SQ_WAKEUP (1U << 1) 523 #define IORING_ENTER_SQ_WAIT (1U << 2) 524 #define IORING_ENTER_EXT_ARG (1U << 3) 525 #define IORING_ENTER_REGISTERED_RING (1U << 4) 526 #define IORING_ENTER_ABS_TIMER (1U << 5) 527 #define IORING_ENTER_EXT_ARG_REG (1U << 6) 528 529 /* 530 * Passed in for io_uring_setup(2). Copied back with updated info on success 531 */ 532 struct io_uring_params { 533 __u32 sq_entries; 534 __u32 cq_entries; 535 __u32 flags; 536 __u32 sq_thread_cpu; 537 __u32 sq_thread_idle; 538 __u32 features; 539 __u32 wq_fd; 540 __u32 resv[3]; 541 struct io_sqring_offsets sq_off; 542 struct io_cqring_offsets cq_off; 543 }; 544 545 /* 546 * io_uring_params->features flags 547 */ 548 #define IORING_FEAT_SINGLE_MMAP (1U << 0) 549 #define IORING_FEAT_NODROP (1U << 1) 550 #define IORING_FEAT_SUBMIT_STABLE (1U << 2) 551 #define IORING_FEAT_RW_CUR_POS (1U << 3) 552 #define IORING_FEAT_CUR_PERSONALITY (1U << 4) 553 #define IORING_FEAT_FAST_POLL (1U << 5) 554 #define IORING_FEAT_POLL_32BITS (1U << 6) 555 #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) 556 #define IORING_FEAT_EXT_ARG (1U << 8) 557 #define IORING_FEAT_NATIVE_WORKERS (1U << 9) 558 #define IORING_FEAT_RSRC_TAGS (1U << 10) 559 #define IORING_FEAT_CQE_SKIP (1U << 11) 560 #define IORING_FEAT_LINKED_FILE (1U << 12) 561 #define IORING_FEAT_REG_REG_RING (1U << 13) 562 #define IORING_FEAT_RECVSEND_BUNDLE (1U << 14) 563 #define IORING_FEAT_MIN_TIMEOUT (1U << 15) 564 565 /* 566 * io_uring_register(2) opcodes and arguments 567 */ 568 enum io_uring_register_op { 569 IORING_REGISTER_BUFFERS = 0, 570 IORING_UNREGISTER_BUFFERS = 1, 571 IORING_REGISTER_FILES = 2, 572 IORING_UNREGISTER_FILES = 3, 573 IORING_REGISTER_EVENTFD = 4, 574 IORING_UNREGISTER_EVENTFD = 5, 575 IORING_REGISTER_FILES_UPDATE = 6, 576 IORING_REGISTER_EVENTFD_ASYNC = 7, 577 IORING_REGISTER_PROBE = 8, 578 IORING_REGISTER_PERSONALITY = 9, 579 IORING_UNREGISTER_PERSONALITY = 10, 580 IORING_REGISTER_RESTRICTIONS = 11, 581 IORING_REGISTER_ENABLE_RINGS = 12, 582 583 /* extended with tagging */ 584 IORING_REGISTER_FILES2 = 13, 585 IORING_REGISTER_FILES_UPDATE2 = 14, 586 IORING_REGISTER_BUFFERS2 = 15, 587 IORING_REGISTER_BUFFERS_UPDATE = 16, 588 589 /* set/clear io-wq thread affinities */ 590 IORING_REGISTER_IOWQ_AFF = 17, 591 IORING_UNREGISTER_IOWQ_AFF = 18, 592 593 /* set/get max number of io-wq workers */ 594 IORING_REGISTER_IOWQ_MAX_WORKERS = 19, 595 596 /* register/unregister io_uring fd with the ring */ 597 IORING_REGISTER_RING_FDS = 20, 598 IORING_UNREGISTER_RING_FDS = 21, 599 600 /* register ring based provide buffer group */ 601 IORING_REGISTER_PBUF_RING = 22, 602 IORING_UNREGISTER_PBUF_RING = 23, 603 604 /* sync cancelation API */ 605 IORING_REGISTER_SYNC_CANCEL = 24, 606 607 /* register a range of fixed file slots for automatic slot allocation */ 608 IORING_REGISTER_FILE_ALLOC_RANGE = 25, 609 610 /* return status information for a buffer group */ 611 IORING_REGISTER_PBUF_STATUS = 26, 612 613 /* set/clear busy poll settings */ 614 IORING_REGISTER_NAPI = 27, 615 IORING_UNREGISTER_NAPI = 28, 616 617 IORING_REGISTER_CLOCK = 29, 618 619 /* clone registered buffers from source ring to current ring */ 620 IORING_REGISTER_CLONE_BUFFERS = 30, 621 622 /* send MSG_RING without having a ring */ 623 IORING_REGISTER_SEND_MSG_RING = 31, 624 625 /* 32 reserved for zc rx */ 626 627 /* resize CQ ring */ 628 IORING_REGISTER_RESIZE_RINGS = 33, 629 630 IORING_REGISTER_MEM_REGION = 34, 631 632 /* this goes last */ 633 IORING_REGISTER_LAST, 634 635 /* flag added to the opcode to use a registered ring fd */ 636 IORING_REGISTER_USE_REGISTERED_RING = 1U << 31 637 }; 638 639 /* io-wq worker categories */ 640 enum io_wq_type { 641 IO_WQ_BOUND, 642 IO_WQ_UNBOUND, 643 }; 644 645 /* deprecated, see struct io_uring_rsrc_update */ 646 struct io_uring_files_update { 647 __u32 offset; 648 __u32 resv; 649 __aligned_u64 /* __s32 * */ fds; 650 }; 651 652 enum { 653 /* initialise with user provided memory pointed by user_addr */ 654 IORING_MEM_REGION_TYPE_USER = 1, 655 }; 656 657 struct io_uring_region_desc { 658 __u64 user_addr; 659 __u64 size; 660 __u32 flags; 661 __u32 id; 662 __u64 mmap_offset; 663 __u64 __resv[4]; 664 }; 665 666 enum { 667 /* expose the region as registered wait arguments */ 668 IORING_MEM_REGION_REG_WAIT_ARG = 1, 669 }; 670 671 struct io_uring_mem_region_reg { 672 __u64 region_uptr; /* struct io_uring_region_desc * */ 673 __u64 flags; 674 __u64 __resv[2]; 675 }; 676 677 /* 678 * Register a fully sparse file space, rather than pass in an array of all 679 * -1 file descriptors. 680 */ 681 #define IORING_RSRC_REGISTER_SPARSE (1U << 0) 682 683 struct io_uring_rsrc_register { 684 __u32 nr; 685 __u32 flags; 686 __u64 resv2; 687 __aligned_u64 data; 688 __aligned_u64 tags; 689 }; 690 691 struct io_uring_rsrc_update { 692 __u32 offset; 693 __u32 resv; 694 __aligned_u64 data; 695 }; 696 697 struct io_uring_rsrc_update2 { 698 __u32 offset; 699 __u32 resv; 700 __aligned_u64 data; 701 __aligned_u64 tags; 702 __u32 nr; 703 __u32 resv2; 704 }; 705 706 /* Skip updating fd indexes set to this value in the fd table */ 707 #define IORING_REGISTER_FILES_SKIP (-2) 708 709 #define IO_URING_OP_SUPPORTED (1U << 0) 710 711 struct io_uring_probe_op { 712 __u8 op; 713 __u8 resv; 714 __u16 flags; /* IO_URING_OP_* flags */ 715 __u32 resv2; 716 }; 717 718 struct io_uring_probe { 719 __u8 last_op; /* last opcode supported */ 720 __u8 ops_len; /* length of ops[] array below */ 721 __u16 resv; 722 __u32 resv2[3]; 723 struct io_uring_probe_op ops[]; 724 }; 725 726 struct io_uring_restriction { 727 __u16 opcode; 728 union { 729 __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */ 730 __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */ 731 __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */ 732 }; 733 __u8 resv; 734 __u32 resv2[3]; 735 }; 736 737 struct io_uring_clock_register { 738 __u32 clockid; 739 __u32 __resv[3]; 740 }; 741 742 enum { 743 IORING_REGISTER_SRC_REGISTERED = (1U << 0), 744 IORING_REGISTER_DST_REPLACE = (1U << 1), 745 }; 746 747 struct io_uring_clone_buffers { 748 __u32 src_fd; 749 __u32 flags; 750 __u32 src_off; 751 __u32 dst_off; 752 __u32 nr; 753 __u32 pad[3]; 754 }; 755 756 struct io_uring_buf { 757 __u64 addr; 758 __u32 len; 759 __u16 bid; 760 __u16 resv; 761 }; 762 763 struct io_uring_buf_ring { 764 union { 765 /* 766 * To avoid spilling into more pages than we need to, the 767 * ring tail is overlaid with the io_uring_buf->resv field. 768 */ 769 struct { 770 __u64 resv1; 771 __u32 resv2; 772 __u16 resv3; 773 __u16 tail; 774 }; 775 __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs); 776 }; 777 }; 778 779 /* 780 * Flags for IORING_REGISTER_PBUF_RING. 781 * 782 * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring. 783 * The application must not set a ring_addr in struct 784 * io_uring_buf_reg, instead it must subsequently call 785 * mmap(2) with the offset set as: 786 * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT) 787 * to get a virtual mapping for the ring. 788 * IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be 789 * consumed incrementally. Normally one (or more) buffers 790 * are fully consumed. With incremental consumptions, it's 791 * feasible to register big ranges of buffers, and each 792 * use of it will consume only as much as it needs. This 793 * requires that both the kernel and application keep 794 * track of where the current read/recv index is at. 795 */ 796 enum io_uring_register_pbuf_ring_flags { 797 IOU_PBUF_RING_MMAP = 1, 798 IOU_PBUF_RING_INC = 2, 799 }; 800 801 /* argument for IORING_(UN)REGISTER_PBUF_RING */ 802 struct io_uring_buf_reg { 803 __u64 ring_addr; 804 __u32 ring_entries; 805 __u16 bgid; 806 __u16 flags; 807 __u64 resv[3]; 808 }; 809 810 /* argument for IORING_REGISTER_PBUF_STATUS */ 811 struct io_uring_buf_status { 812 __u32 buf_group; /* input */ 813 __u32 head; /* output */ 814 __u32 resv[8]; 815 }; 816 817 enum io_uring_napi_op { 818 /* register/ungister backward compatible opcode */ 819 IO_URING_NAPI_REGISTER_OP = 0, 820 821 /* opcodes to update napi_list when static tracking is used */ 822 IO_URING_NAPI_STATIC_ADD_ID = 1, 823 IO_URING_NAPI_STATIC_DEL_ID = 2 824 }; 825 826 enum io_uring_napi_tracking_strategy { 827 /* value must be 0 for backward compatibility */ 828 IO_URING_NAPI_TRACKING_DYNAMIC = 0, 829 IO_URING_NAPI_TRACKING_STATIC = 1, 830 IO_URING_NAPI_TRACKING_INACTIVE = 255 831 }; 832 833 /* argument for IORING_(UN)REGISTER_NAPI */ 834 struct io_uring_napi { 835 __u32 busy_poll_to; 836 __u8 prefer_busy_poll; 837 838 /* a io_uring_napi_op value */ 839 __u8 opcode; 840 __u8 pad[2]; 841 842 /* 843 * for IO_URING_NAPI_REGISTER_OP, it is a 844 * io_uring_napi_tracking_strategy value. 845 * 846 * for IO_URING_NAPI_STATIC_ADD_ID/IO_URING_NAPI_STATIC_DEL_ID 847 * it is the napi id to add/del from napi_list. 848 */ 849 __u32 op_param; 850 __u32 resv; 851 }; 852 853 /* 854 * io_uring_restriction->opcode values 855 */ 856 enum io_uring_register_restriction_op { 857 /* Allow an io_uring_register(2) opcode */ 858 IORING_RESTRICTION_REGISTER_OP = 0, 859 860 /* Allow an sqe opcode */ 861 IORING_RESTRICTION_SQE_OP = 1, 862 863 /* Allow sqe flags */ 864 IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, 865 866 /* Require sqe flags (these flags must be set on each submission) */ 867 IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, 868 869 IORING_RESTRICTION_LAST 870 }; 871 872 enum { 873 IORING_REG_WAIT_TS = (1U << 0), 874 }; 875 876 /* 877 * Argument for io_uring_enter(2) with 878 * IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument 879 * is an index into a previously registered fixed wait region described by 880 * the below structure. 881 */ 882 struct io_uring_reg_wait { 883 struct __kernel_timespec ts; 884 __u32 min_wait_usec; 885 __u32 flags; 886 __u64 sigmask; 887 __u32 sigmask_sz; 888 __u32 pad[3]; 889 __u64 pad2[2]; 890 }; 891 892 /* 893 * Argument for io_uring_enter(2) with IORING_GETEVENTS | IORING_ENTER_EXT_ARG 894 */ 895 struct io_uring_getevents_arg { 896 __u64 sigmask; 897 __u32 sigmask_sz; 898 __u32 min_wait_usec; 899 __u64 ts; 900 }; 901 902 /* 903 * Argument for IORING_REGISTER_SYNC_CANCEL 904 */ 905 struct io_uring_sync_cancel_reg { 906 __u64 addr; 907 __s32 fd; 908 __u32 flags; 909 struct __kernel_timespec timeout; 910 __u8 opcode; 911 __u8 pad[7]; 912 __u64 pad2[3]; 913 }; 914 915 /* 916 * Argument for IORING_REGISTER_FILE_ALLOC_RANGE 917 * The range is specified as [off, off + len) 918 */ 919 struct io_uring_file_index_range { 920 __u32 off; 921 __u32 len; 922 __u64 resv; 923 }; 924 925 struct io_uring_recvmsg_out { 926 __u32 namelen; 927 __u32 controllen; 928 __u32 payloadlen; 929 __u32 flags; 930 }; 931 932 /* 933 * Argument for IORING_OP_URING_CMD when file is a socket 934 */ 935 enum io_uring_socket_op { 936 SOCKET_URING_OP_SIOCINQ = 0, 937 SOCKET_URING_OP_SIOCOUTQ, 938 SOCKET_URING_OP_GETSOCKOPT, 939 SOCKET_URING_OP_SETSOCKOPT, 940 }; 941 942 #ifdef __cplusplus 943 } 944 #endif 945 946 #endif 947