1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "linux/io_uring/query.h"
4
5 #include "query.h"
6 #include "io_uring.h"
7 #include "zcrx.h"
8
9 union io_query_data {
10 struct io_uring_query_opcode opcodes;
11 struct io_uring_query_zcrx zcrx;
12 struct io_uring_query_scq scq;
13 };
14
15 #define IO_MAX_QUERY_SIZE sizeof(union io_query_data)
16 #define IO_MAX_QUERY_ENTRIES 1000
17
io_query_ops(union io_query_data * data)18 static ssize_t io_query_ops(union io_query_data *data)
19 {
20 struct io_uring_query_opcode *e = &data->opcodes;
21
22 e->nr_request_opcodes = IORING_OP_LAST;
23 e->nr_register_opcodes = IORING_REGISTER_LAST;
24 e->feature_flags = IORING_FEAT_FLAGS;
25 e->ring_setup_flags = IORING_SETUP_FLAGS;
26 e->enter_flags = IORING_ENTER_FLAGS;
27 e->sqe_flags = SQE_VALID_FLAGS;
28 e->nr_query_opcodes = __IO_URING_QUERY_MAX;
29 e->__pad = 0;
30 return sizeof(*e);
31 }
32
io_query_zcrx(union io_query_data * data)33 static ssize_t io_query_zcrx(union io_query_data *data)
34 {
35 struct io_uring_query_zcrx *e = &data->zcrx;
36
37 e->register_flags = ZCRX_REG_IMPORT;
38 e->area_flags = IORING_ZCRX_AREA_DMABUF;
39 e->nr_ctrl_opcodes = __ZCRX_CTRL_LAST;
40 e->rq_hdr_size = sizeof(struct io_uring);
41 e->rq_hdr_alignment = L1_CACHE_BYTES;
42 e->__resv1 = 0;
43 e->__resv2 = 0;
44 return sizeof(*e);
45 }
46
io_query_scq(union io_query_data * data)47 static ssize_t io_query_scq(union io_query_data *data)
48 {
49 struct io_uring_query_scq *e = &data->scq;
50
51 e->hdr_size = sizeof(struct io_rings);
52 e->hdr_alignment = SMP_CACHE_BYTES;
53 return sizeof(*e);
54 }
55
io_handle_query_entry(union io_query_data * data,void __user * uhdr,u64 * next_entry)56 static int io_handle_query_entry(union io_query_data *data, void __user *uhdr,
57 u64 *next_entry)
58 {
59 struct io_uring_query_hdr hdr;
60 size_t usize, res_size = 0;
61 ssize_t ret = -EINVAL;
62 void __user *udata;
63
64 if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
65 return -EFAULT;
66 usize = hdr.size;
67 hdr.size = min(hdr.size, IO_MAX_QUERY_SIZE);
68 udata = u64_to_user_ptr(hdr.query_data);
69
70 if (hdr.query_op >= __IO_URING_QUERY_MAX) {
71 ret = -EOPNOTSUPP;
72 goto out;
73 }
74 if (!mem_is_zero(hdr.__resv, sizeof(hdr.__resv)) || hdr.result || !hdr.size)
75 goto out;
76 if (copy_from_user(data, udata, hdr.size))
77 return -EFAULT;
78
79 switch (hdr.query_op) {
80 case IO_URING_QUERY_OPCODES:
81 ret = io_query_ops(data);
82 break;
83 case IO_URING_QUERY_ZCRX:
84 ret = io_query_zcrx(data);
85 break;
86 case IO_URING_QUERY_SCQ:
87 ret = io_query_scq(data);
88 break;
89 }
90
91 if (ret >= 0) {
92 if (WARN_ON_ONCE(ret > IO_MAX_QUERY_SIZE))
93 return -EFAULT;
94 res_size = ret;
95 ret = 0;
96 }
97 out:
98 hdr.result = ret;
99 hdr.size = min_t(size_t, usize, res_size);
100
101 if (copy_struct_to_user(udata, usize, data, hdr.size, NULL))
102 return -EFAULT;
103 if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
104 return -EFAULT;
105 *next_entry = hdr.next_entry;
106 return 0;
107 }
108
io_query(void __user * arg,unsigned nr_args)109 int io_query(void __user *arg, unsigned nr_args)
110 {
111 union io_query_data entry_buffer;
112 void __user *uhdr = arg;
113 int ret, nr = 0;
114
115 memset(&entry_buffer, 0, sizeof(entry_buffer));
116
117 if (nr_args)
118 return -EINVAL;
119
120 while (uhdr) {
121 u64 next_hdr;
122
123 ret = io_handle_query_entry(&entry_buffer, uhdr, &next_hdr);
124 if (ret)
125 return ret;
126 uhdr = u64_to_user_ptr(next_hdr);
127
128 /* Have some limit to avoid a potential cycle */
129 if (++nr >= IO_MAX_QUERY_ENTRIES)
130 return -ERANGE;
131 if (fatal_signal_pending(current))
132 return -EINTR;
133 cond_resched();
134 }
135 return 0;
136 }
137