xref: /linux/io_uring/query.c (revision ac9f4f306d943c1c551280977ae5321167835088)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "linux/io_uring/query.h"
4 
5 #include "query.h"
6 #include "io_uring.h"
7 
8 #define IO_MAX_QUERY_SIZE		(sizeof(struct io_uring_query_opcode))
9 #define IO_MAX_QUERY_ENTRIES		1000
10 
io_query_ops(void * data)11 static ssize_t io_query_ops(void *data)
12 {
13 	struct io_uring_query_opcode *e = data;
14 
15 	BUILD_BUG_ON(sizeof(*e) > IO_MAX_QUERY_SIZE);
16 
17 	e->nr_request_opcodes = IORING_OP_LAST;
18 	e->nr_register_opcodes = IORING_REGISTER_LAST;
19 	e->feature_flags = IORING_FEAT_FLAGS;
20 	e->ring_setup_flags = IORING_SETUP_FLAGS;
21 	e->enter_flags = IORING_ENTER_FLAGS;
22 	e->sqe_flags = SQE_VALID_FLAGS;
23 	e->nr_query_opcodes = __IO_URING_QUERY_MAX;
24 	e->__pad = 0;
25 	return sizeof(*e);
26 }
27 
io_handle_query_entry(struct io_ring_ctx * ctx,void * data,void __user * uhdr,u64 * next_entry)28 static int io_handle_query_entry(struct io_ring_ctx *ctx,
29 				 void *data, void __user *uhdr,
30 				 u64 *next_entry)
31 {
32 	struct io_uring_query_hdr hdr;
33 	size_t usize, res_size = 0;
34 	ssize_t ret = -EINVAL;
35 	void __user *udata;
36 
37 	if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
38 		return -EFAULT;
39 	usize = hdr.size;
40 	hdr.size = min(hdr.size, IO_MAX_QUERY_SIZE);
41 	udata = u64_to_user_ptr(hdr.query_data);
42 
43 	if (hdr.query_op >= __IO_URING_QUERY_MAX) {
44 		ret = -EOPNOTSUPP;
45 		goto out;
46 	}
47 	if (!mem_is_zero(hdr.__resv, sizeof(hdr.__resv)) || hdr.result || !hdr.size)
48 		goto out;
49 	if (copy_from_user(data, udata, hdr.size))
50 		return -EFAULT;
51 
52 	switch (hdr.query_op) {
53 	case IO_URING_QUERY_OPCODES:
54 		ret = io_query_ops(data);
55 		break;
56 	}
57 
58 	if (ret >= 0) {
59 		if (WARN_ON_ONCE(ret > IO_MAX_QUERY_SIZE))
60 			return -EFAULT;
61 		res_size = ret;
62 		ret = 0;
63 	}
64 out:
65 	hdr.result = ret;
66 	hdr.size = min_t(size_t, usize, res_size);
67 
68 	if (copy_struct_to_user(udata, usize, data, hdr.size, NULL))
69 		return -EFAULT;
70 	if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
71 		return -EFAULT;
72 	*next_entry = hdr.next_entry;
73 	return 0;
74 }
75 
io_query(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)76 int io_query(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
77 {
78 	char entry_buffer[IO_MAX_QUERY_SIZE];
79 	void __user *uhdr = arg;
80 	int ret, nr = 0;
81 
82 	memset(entry_buffer, 0, sizeof(entry_buffer));
83 
84 	if (nr_args)
85 		return -EINVAL;
86 
87 	while (uhdr) {
88 		u64 next_hdr;
89 
90 		ret = io_handle_query_entry(ctx, entry_buffer, uhdr, &next_hdr);
91 		if (ret)
92 			return ret;
93 		uhdr = u64_to_user_ptr(next_hdr);
94 
95 		/* Have some limit to avoid a potential cycle */
96 		if (++nr >= IO_MAX_QUERY_ENTRIES)
97 			return -ERANGE;
98 		if (fatal_signal_pending(current))
99 			return -EINTR;
100 		cond_resched();
101 	}
102 	return 0;
103 }
104