xref: /linux/include/linux/io_uring/cmd.h (revision 91928e0d3cc29789f4483bffee5f36218f23942b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_IO_URING_CMD_H
3 #define _LINUX_IO_URING_CMD_H
4 
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7 #include <linux/blk-mq.h>
8 
9 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
10 #define IORING_URING_CMD_CANCELABLE	(1U << 30)
11 
12 struct io_uring_cmd {
13 	struct file	*file;
14 	const struct io_uring_sqe *sqe;
15 	/* callback to defer completions to task context */
16 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
17 	u32		cmd_op;
18 	u32		flags;
19 	u8		pdu[32]; /* available inline for free use */
20 };
21 
22 struct io_uring_cmd_data {
23 	void			*op_data;
24 	struct io_uring_sqe	sqes[2];
25 };
26 
io_uring_sqe_cmd(const struct io_uring_sqe * sqe)27 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
28 {
29 	return sqe->cmd;
30 }
31 
io_uring_cmd_private_sz_check(size_t cmd_sz)32 static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
33 {
34 	BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
35 }
36 #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
37 	io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
38 	((pdu_type *)&(cmd)->pdu) \
39 )
40 
41 #if defined(CONFIG_IO_URING)
42 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
43 			      struct iov_iter *iter,
44 			      struct io_uring_cmd *ioucmd,
45 			      unsigned int issue_flags);
46 
47 /*
48  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
49  * and the corresponding io_uring request.
50  *
51  * Note: the caller should never hard code @issue_flags and is only allowed
52  * to pass the mask provided by the core io_uring code.
53  */
54 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
55 			unsigned issue_flags);
56 
57 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
58 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
59 			    unsigned flags);
60 
61 /*
62  * Note: the caller should never hard code @issue_flags and only use the
63  * mask provided by the core io_uring code.
64  */
65 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
66 		unsigned int issue_flags);
67 
68 /* Execute the request from a blocking context */
69 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
70 
71 #else
72 static inline int
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)73 io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
74 			  struct iov_iter *iter, struct io_uring_cmd *ioucmd,
75 			  unsigned int issue_flags)
76 {
77 	return -EOPNOTSUPP;
78 }
io_uring_cmd_done(struct io_uring_cmd * cmd,ssize_t ret,u64 ret2,unsigned issue_flags)79 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
80 		u64 ret2, unsigned issue_flags)
81 {
82 }
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)83 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
84 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
85 			    unsigned flags)
86 {
87 }
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)88 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
89 		unsigned int issue_flags)
90 {
91 }
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)92 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
93 {
94 }
95 #endif
96 
97 /*
98  * Polled completions must ensure they are coming from a poll queue, and
99  * hence are completed inside the usual poll handling loops.
100  */
io_uring_cmd_iopoll_done(struct io_uring_cmd * ioucmd,ssize_t ret,ssize_t res2)101 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
102 					    ssize_t ret, ssize_t res2)
103 {
104 	lockdep_assert(in_task());
105 	io_uring_cmd_done(ioucmd, ret, res2, 0);
106 }
107 
108 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
io_uring_cmd_do_in_task_lazy(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))109 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
110 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
111 {
112 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
113 }
114 
io_uring_cmd_complete_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))115 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
116 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
117 {
118 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
119 }
120 
io_uring_cmd_get_task(struct io_uring_cmd * cmd)121 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
122 {
123 	return cmd_to_io_kiocb(cmd)->tctx->task;
124 }
125 
io_uring_cmd_get_async_data(struct io_uring_cmd * cmd)126 static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd)
127 {
128 	return cmd_to_io_kiocb(cmd)->async_data;
129 }
130 
131 int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
132 			    void (*release)(void *), unsigned int index,
133 			    unsigned int issue_flags);
134 int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
135 			      unsigned int issue_flags);
136 
137 #endif /* _LINUX_IO_URING_CMD_H */
138