| /linux/io_uring/ |
| H A D | tctx.c | 47 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() 66 tsk->io_uring = NULL; in __io_uring_free() 98 task->io_uring = tctx; in io_uring_alloc_task_context() 106 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node() 115 tctx = current->io_uring; in __io_uring_add_tctx_node() 158 current->io_uring->last = ctx; in __io_uring_add_tctx_node_from_submit() 167 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node() 210 struct io_uring_task *tctx = current->io_uring; in io_uring_unreg_ringfd() 280 tctx = current->io_uring; in io_ringfd_register() 326 struct io_uring_task *tctx = current->io_uring; in io_ringfd_unregister()
|
| H A D | cancel.c | 112 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring); in io_try_cancel() 192 ret = io_async_cancel_one(node->task->io_uring, cd); in __io_async_cancel() 299 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); in io_sync_cancel() 323 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); in io_sync_cancel() 492 struct io_uring_task *tctx = node->task->io_uring; in io_uring_try_cancel_iowq() 580 struct io_uring_task *tctx = current->io_uring; in io_uring_cancel_generic() 589 if (!current->io_uring) in io_uring_cancel_generic() 613 current->io_uring, in io_uring_cancel_generic() 620 current->io_uring, in io_uring_cancel_generic()
|
| H A D | sqpoll.c | 270 struct io_uring_task *tctx = current->io_uring; in io_sq_tw() 288 struct io_uring_task *tctx = current->io_uring; in io_sq_tw_pending() 303 if (!current->io_uring) { in io_sq_thread() 558 ret = io_wq_cpu_affinity(tsk->io_uring, mask); in io_sqpoll_wq_cpu_affinity()
|
| H A D | Kconfig | 3 # io_uring configuration
|
| H A D | tctx.h | 27 struct io_uring_task *tctx = current->io_uring; in io_uring_add_tctx_node()
|
| H A D | register.c | 208 ret = io_wq_cpu_affinity(current->io_uring, new_mask); in __io_register_iowq_aff() 287 tctx = tsk->io_uring; in io_register_iowq_max_workers() 290 tctx = current->io_uring; in io_register_iowq_max_workers() 325 tctx = node->task->io_uring; in io_register_iowq_max_workers() 845 struct io_uring_task *tctx = current->io_uring; in io_uring_register_get_file()
|
| H A D | zcrx.h | 48 struct io_uring *rq_ring;
|
| H A D | query.c | 40 e->rq_hdr_size = sizeof(struct io_uring); in io_query_zcrx()
|
| H A D | io_uring.c | 679 struct io_uring_task *tctx = task->io_uring; in io_uring_drop_tctx_refs() 2138 req->tctx = current->io_uring; in io_init_req() 2162 current->io_uring->cached_refs++; in io_init_req() 2447 current->io_uring->cached_refs += left; in io_submit_sqes() 2486 struct io_uring_task *tctx = current->io_uring; in current_pending_io() 2969 struct io_uring_task *tctx = current->io_uring; in io_tctx_exit_cb() 3019 if (tsk && tsk->io_uring && tsk->io_uring->io_wq) in io_ring_exit_work() 3020 io_wq_cancel_cb(tsk->io_uring->io_wq, in io_ring_exit_work() 3240 struct io_uring_task *tctx = current->io_uring; in SYSCALL_DEFINE6() 3681 tctx = current->io_uring; in io_uring_create()
|
| H A D | zcrx.c | 350 offsets->head = offsetof(struct io_uring, head); in io_fill_zcrx_offsets() 351 offsets->tail = offsetof(struct io_uring, tail); in io_fill_zcrx_offsets() 352 offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES); in io_fill_zcrx_offsets() 380 ifq->rq_ring = (struct io_uring *)ptr; in io_allocate_rbuf_ring()
|
| /linux/tools/include/io_uring/ |
| H A D | mini_liburing.h | 54 struct io_uring { struct 130 struct io_uring *ring, in io_uring_queue_init() 152 static inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) in io_uring_get_sqe() 161 static inline int io_uring_wait_cqe(struct io_uring *ring, in io_uring_wait_cqe() 185 static inline int io_uring_submit(struct io_uring *ring) in io_uring_submit() 220 static inline void io_uring_queue_exit(struct io_uring *ring) in io_uring_queue_exit() 247 static inline int io_uring_register_buffers(struct io_uring *ring, in io_uring_register_buffers() 278 static inline void io_uring_cqe_seen(struct io_uring *ring) in io_uring_cqe_seen()
|
| /linux/include/linux/ |
| H A D | io_uring.h | 18 if (current->io_uring) in io_uring_files_cancel() 23 if (current->io_uring) in io_uring_task_cancel() 28 if (tsk->io_uring) in io_uring_free()
|
| H A D | io_uring_types.h | 139 struct io_uring { struct 160 struct io_uring sq, cq; argument
|
| /linux/Documentation/block/ |
| H A D | ublk.rst | 47 ``io_uring`` passthrough command; that is why ublk is also one io_uring based 48 block driver. It has been observed that using io_uring passthrough command can 51 done by io_uring, but also the preferred IO handling in ublk server is io_uring 76 # do anything. all IOs are handled by io_uring 119 threads & io_uring for handling ublk IO), this command is sent to the 127 io_uring). 245 thread should have its own io_uring through which it is notified of new 259 The following IO commands are communicated via io_uring passthrough command, 287 the IO notification via io_uring. 328 ublk zero copy relies on io_uring's fixed kernel buffer, which provides [all …]
|
| /linux/tools/testing/selftests/ublk/ |
| H A D | kublk.h | 24 #include <linux/io_uring.h> 185 struct io_uring ring; 198 struct io_uring ring; 266 struct io_uring *ring = &t->ring; in ublk_io_alloc_sqes()
|
| H A D | test_common.sh | 289 fio --name=batchjob --filename=/dev/ublkb"${dev_id}" --ioengine=io_uring \
|
| /linux/tools/testing/vsock/ |
| H A D | vsock_uring_test.c | 64 struct io_uring ring; in vsock_io_uring_client() 117 struct io_uring ring; in vsock_io_uring_server()
|
| /linux/tools/testing/selftests/net/ |
| H A D | io_uring_zerocopy_tx.c | 18 #include <linux/io_uring.h> 39 #include <io_uring/mini_liburing.h> 99 struct io_uring ring; in do_tx() 109 error(1, -ret, "io_uring: queue init"); in do_tx() 116 error(1, -ret, "io_uring: buffer registration"); in do_tx()
|
| /linux/tools/testing/selftests/mm/ |
| H A D | gup_longterm.c | 231 struct io_uring ring; in do_test()
|
| /linux/Documentation/core-api/ |
| H A D | protection-keys.rst | 118 Note that kernel accesses from a kthread (such as io_uring) will use a default
|
| /linux/Documentation/admin-guide/sysctl/ |
| H A D | kernel.rst | 498 Prevents all processes from creating new io_uring instances. Enabling this 502 0 All processes can create io_uring instances as normal. This is the 504 1 io_uring creation is disabled (io_uring_setup() will fail with 506 Existing io_uring instances can still be used. See the 508 2 io_uring creation is disabled for all processes. io_uring_setup() 509 always fails with -EPERM. Existing io_uring instances can still be 519 to create an io_uring instance. If io_uring_group is set to -1 (the 521 io_uring instances.
|
| /linux/drivers/block/ |
| H A D | Kconfig | 332 io_uring based userspace block driver. Together with ublk server, ublk 346 may help security subsystem to audit io_uring command.
|
| /linux/Documentation/filesystems/fuse/ |
| H A D | fuse-passthrough.rst | 94 **NOTE**: ``io_uring`` solves this similar issue by exposing its "fixed files",
|
| /linux/fs/fuse/ |
| H A D | dev_uring.c | 1154 if (!enable_uring && !fc->io_uring) { in fuse_uring_cmd() 1177 fc->io_uring = 0; in fuse_uring_cmd()
|
| /linux/include/trace/events/ |
| H A D | io_uring.h | 3 #define TRACE_SYSTEM io_uring
|