xref: /linux/io_uring/cmd_net.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/ioctls.h>
3 #include <linux/io_uring/net.h>
4 #include <linux/errqueue.h>
5 #include <net/sock.h>
6 
7 #include "uring_cmd.h"
8 #include "io_uring.h"
9 
10 static inline int io_uring_cmd_getsockopt(struct socket *sock,
11 					  struct io_uring_cmd *cmd,
12 					  unsigned int issue_flags)
13 {
14 	const struct io_uring_sqe *sqe = cmd->sqe;
15 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
16 	int optlen, optname, level, err;
17 	void __user *optval;
18 
19 	level = READ_ONCE(sqe->level);
20 	if (level != SOL_SOCKET)
21 		return -EOPNOTSUPP;
22 
23 	optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
24 	optname = READ_ONCE(sqe->optname);
25 	optlen = READ_ONCE(sqe->optlen);
26 
27 	err = do_sock_getsockopt(sock, compat, level, optname,
28 				 USER_SOCKPTR(optval),
29 				 KERNEL_SOCKPTR(&optlen));
30 	if (err)
31 		return err;
32 
33 	/* On success, return optlen */
34 	return optlen;
35 }
36 
37 static inline int io_uring_cmd_setsockopt(struct socket *sock,
38 					  struct io_uring_cmd *cmd,
39 					  unsigned int issue_flags)
40 {
41 	const struct io_uring_sqe *sqe = cmd->sqe;
42 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
43 	int optname, optlen, level;
44 	void __user *optval;
45 	sockptr_t optval_s;
46 
47 	optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
48 	optname = READ_ONCE(sqe->optname);
49 	optlen = READ_ONCE(sqe->optlen);
50 	level = READ_ONCE(sqe->level);
51 	optval_s = USER_SOCKPTR(optval);
52 
53 	return do_sock_setsockopt(sock, compat, level, optname, optval_s,
54 				  optlen);
55 }
56 
57 static bool io_process_timestamp_skb(struct io_uring_cmd *cmd, struct sock *sk,
58 				     struct sk_buff *skb, unsigned issue_flags)
59 {
60 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
61 	struct io_uring_cqe cqe[2];
62 	struct io_timespec *iots;
63 	struct timespec64 ts;
64 	u32 tstype, tskey;
65 	int ret;
66 
67 	BUILD_BUG_ON(sizeof(struct io_uring_cqe) != sizeof(struct io_timespec));
68 
69 	ret = skb_get_tx_timestamp(skb, sk, &ts);
70 	if (ret < 0)
71 		return false;
72 
73 	tskey = serr->ee.ee_data;
74 	tstype = serr->ee.ee_info;
75 
76 	cqe->user_data = 0;
77 	cqe->res = tskey;
78 	cqe->flags = IORING_CQE_F_MORE | ctx_cqe32_flags(cmd_to_io_kiocb(cmd)->ctx);
79 	cqe->flags |= tstype << IORING_TIMESTAMP_TYPE_SHIFT;
80 	if (ret == SOF_TIMESTAMPING_TX_HARDWARE)
81 		cqe->flags |= IORING_CQE_F_TSTAMP_HW;
82 
83 	iots = (struct io_timespec *)&cqe[1];
84 	iots->tv_sec = ts.tv_sec;
85 	iots->tv_nsec = ts.tv_nsec;
86 	return io_uring_cmd_post_mshot_cqe32(cmd, issue_flags, cqe);
87 }
88 
89 static int io_uring_cmd_timestamp(struct socket *sock,
90 				  struct io_uring_cmd *cmd,
91 				  unsigned int issue_flags)
92 {
93 	struct sock *sk = sock->sk;
94 	struct sk_buff_head *q = &sk->sk_error_queue;
95 	struct sk_buff *skb, *tmp;
96 	struct sk_buff_head list;
97 	int ret;
98 
99 	if (!(issue_flags & IO_URING_F_CQE32))
100 		return -EINVAL;
101 	ret = io_cmd_poll_multishot(cmd, issue_flags, EPOLLERR);
102 	if (unlikely(ret))
103 		return ret;
104 
105 	if (skb_queue_empty_lockless(q))
106 		return -EAGAIN;
107 	__skb_queue_head_init(&list);
108 
109 	scoped_guard(spinlock_irq, &q->lock) {
110 		skb_queue_walk_safe(q, skb, tmp) {
111 			/* don't support skbs with payload */
112 			if (!skb_has_tx_timestamp(skb, sk) || skb->len)
113 				continue;
114 			__skb_unlink(skb, q);
115 			__skb_queue_tail(&list, skb);
116 		}
117 	}
118 
119 	while (1) {
120 		skb = skb_peek(&list);
121 		if (!skb)
122 			break;
123 		if (!io_process_timestamp_skb(cmd, sk, skb, issue_flags))
124 			break;
125 		__skb_dequeue(&list);
126 		consume_skb(skb);
127 	}
128 
129 	if (!unlikely(skb_queue_empty(&list))) {
130 		scoped_guard(spinlock_irqsave, &q->lock)
131 			skb_queue_splice(&list, q);
132 	}
133 	return -EAGAIN;
134 }
135 
136 static int io_uring_cmd_getsockname(struct socket *sock,
137 				    struct io_uring_cmd *cmd,
138 				    unsigned int issue_flags)
139 {
140 	const struct io_uring_sqe *sqe = cmd->sqe;
141 	struct sockaddr __user *uaddr;
142 	unsigned int peer;
143 	int __user *ulen;
144 
145 	if (sqe->ioprio || sqe->__pad1 || sqe->len || sqe->rw_flags)
146 		return -EINVAL;
147 
148 	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
149 	ulen = u64_to_user_ptr(sqe->addr3);
150 	peer = READ_ONCE(sqe->optlen);
151 	if (peer > 1)
152 		return -EINVAL;
153 	return do_getsockname(sock, peer, uaddr, ulen);
154 }
155 
156 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
157 {
158 	struct socket *sock = cmd->file->private_data;
159 	struct sock *sk = sock->sk;
160 	struct proto *prot = READ_ONCE(sk->sk_prot);
161 	int ret, arg = 0;
162 
163 	if (!prot || !prot->ioctl)
164 		return -EOPNOTSUPP;
165 
166 	switch (cmd->cmd_op) {
167 	case SOCKET_URING_OP_SIOCINQ:
168 		ret = prot->ioctl(sk, SIOCINQ, &arg);
169 		if (ret)
170 			return ret;
171 		return arg;
172 	case SOCKET_URING_OP_SIOCOUTQ:
173 		ret = prot->ioctl(sk, SIOCOUTQ, &arg);
174 		if (ret)
175 			return ret;
176 		return arg;
177 	case SOCKET_URING_OP_GETSOCKOPT:
178 		return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
179 	case SOCKET_URING_OP_SETSOCKOPT:
180 		return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
181 	case SOCKET_URING_OP_TX_TIMESTAMP:
182 		return io_uring_cmd_timestamp(sock, cmd, issue_flags);
183 	case SOCKET_URING_OP_GETSOCKNAME:
184 		return io_uring_cmd_getsockname(sock, cmd, issue_flags);
185 	default:
186 		return -EOPNOTSUPP;
187 	}
188 }
189 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
190