1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _LINUX_IO_URING_CMD_H 3 #define _LINUX_IO_URING_CMD_H 4 5 #include <uapi/linux/io_uring.h> 6 #include <linux/io_uring_types.h> 7 8 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ 9 #define IORING_URING_CMD_CANCELABLE (1U << 30) 10 11 struct io_uring_cmd { 12 struct file *file; 13 const struct io_uring_sqe *sqe; 14 /* callback to defer completions to task context */ 15 void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); 16 u32 cmd_op; 17 u32 flags; 18 u8 pdu[32]; /* available inline for free use */ 19 }; 20 21 struct io_uring_cmd_data { 22 struct io_uring_sqe sqes[2]; 23 }; 24 25 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) 26 { 27 return sqe->cmd; 28 } 29 30 static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) 31 { 32 BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu)); 33 } 34 #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \ 35 io_uring_cmd_private_sz_check(sizeof(pdu_type)), \ 36 ((pdu_type *)&(cmd)->pdu) \ 37 ) 38 39 #if defined(CONFIG_IO_URING) 40 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 41 struct iov_iter *iter, void *ioucmd); 42 43 /* 44 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd 45 * and the corresponding io_uring request. 46 * 47 * Note: the caller should never hard code @issue_flags and is only allowed 48 * to pass the mask provided by the core io_uring code. 49 */ 50 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, 51 unsigned issue_flags); 52 53 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 54 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 55 unsigned flags); 56 57 /* 58 * Note: the caller should never hard code @issue_flags and only use the 59 * mask provided by the core io_uring code. 60 */ 61 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 62 unsigned int issue_flags); 63 64 /* Execute the request from a blocking context */ 65 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); 66 67 #else 68 static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 69 struct iov_iter *iter, void *ioucmd) 70 { 71 return -EOPNOTSUPP; 72 } 73 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, 74 ssize_t ret2, unsigned issue_flags) 75 { 76 } 77 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 78 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 79 unsigned flags) 80 { 81 } 82 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 83 unsigned int issue_flags) 84 { 85 } 86 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) 87 { 88 } 89 #endif 90 91 /* 92 * Polled completions must ensure they are coming from a poll queue, and 93 * hence are completed inside the usual poll handling loops. 94 */ 95 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, 96 ssize_t ret, ssize_t res2) 97 { 98 lockdep_assert(in_task()); 99 io_uring_cmd_done(ioucmd, ret, res2, 0); 100 } 101 102 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ 103 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, 104 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 105 { 106 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); 107 } 108 109 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 110 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 111 { 112 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); 113 } 114 115 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) 116 { 117 return cmd_to_io_kiocb(cmd)->task; 118 } 119 120 #endif /* _LINUX_IO_URING_CMD_H */ 121