xref: /linux/net/sctp/diag.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * (C) Copyright Red Hat Inc. 2017
4  *
5  * This file is part of the SCTP kernel implementation
6  *
7  * These functions implement sctp diag support.
8  *
9  * Please send any bug reports or fixes you make to the
10  * email addresched(es):
11  *    lksctp developers <linux-sctp@vger.kernel.org>
12  *
13  * Written or modified by:
14  *    Xin Long <lucien.xin@gmail.com>
15  */
16 
17 #include <linux/module.h>
18 #include <linux/inet_diag.h>
19 #include <linux/sock_diag.h>
20 #include <net/sctp/sctp.h>
21 
22 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
23 			       void *info);
24 
25 /* define some functions to make asoc/ep fill look clean */
26 static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
27 					struct sock *sk,
28 					struct sctp_association *asoc)
29 {
30 	union sctp_addr laddr, paddr;
31 	struct dst_entry *dst;
32 	struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
33 
34 	laddr = list_entry(asoc->base.bind_addr.address_list.next,
35 			   struct sctp_sockaddr_entry, list)->a;
36 	paddr = asoc->peer.primary_path->ipaddr;
37 	dst = asoc->peer.primary_path->dst;
38 
39 	r->idiag_family = sk->sk_family;
40 	r->id.idiag_sport = htons(asoc->base.bind_addr.port);
41 	r->id.idiag_dport = htons(asoc->peer.port);
42 	r->id.idiag_if = dst ? dst->dev->ifindex : 0;
43 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
44 
45 #if IS_ENABLED(CONFIG_IPV6)
46 	if (sk->sk_family == AF_INET6) {
47 		*(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
48 		*(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
49 	} else
50 #endif
51 	{
52 		memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
53 		memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
54 
55 		r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
56 		r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
57 	}
58 
59 	r->idiag_state = asoc->state;
60 	if (timer_pending(t3_rtx)) {
61 		r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
62 		r->idiag_retrans = asoc->rtx_data_chunks;
63 		r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
64 	}
65 }
66 
67 static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
68 					 struct list_head *address_list)
69 {
70 	struct sctp_sockaddr_entry *laddr;
71 	int addrlen = sizeof(struct sockaddr_storage);
72 	int addrcnt = 0;
73 	struct nlattr *attr;
74 	void *info = NULL;
75 
76 	list_for_each_entry_rcu(laddr, address_list, list)
77 		addrcnt++;
78 
79 	attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
80 	if (!attr)
81 		return -EMSGSIZE;
82 
83 	info = nla_data(attr);
84 	list_for_each_entry_rcu(laddr, address_list, list) {
85 		memcpy(info, &laddr->a, sizeof(laddr->a));
86 		memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
87 		info += addrlen;
88 	}
89 
90 	return 0;
91 }
92 
93 static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
94 					struct sctp_association *asoc)
95 {
96 	int addrlen = sizeof(struct sockaddr_storage);
97 	struct sctp_transport *from;
98 	struct nlattr *attr;
99 	void *info = NULL;
100 
101 	attr = nla_reserve(skb, INET_DIAG_PEERS,
102 			   addrlen * asoc->peer.transport_count);
103 	if (!attr)
104 		return -EMSGSIZE;
105 
106 	info = nla_data(attr);
107 	list_for_each_entry(from, &asoc->peer.transport_addr_list,
108 			    transports) {
109 		memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
110 		memset(info + sizeof(from->ipaddr), 0,
111 		       addrlen - sizeof(from->ipaddr));
112 		info += addrlen;
113 	}
114 
115 	return 0;
116 }
117 
118 /* sctp asoc/ep fill*/
119 static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
120 			       struct sk_buff *skb,
121 			       const struct inet_diag_req_v2 *req,
122 			       struct user_namespace *user_ns,
123 			       int portid, u32 seq, u16 nlmsg_flags,
124 			       const struct nlmsghdr *unlh,
125 			       bool net_admin)
126 {
127 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
128 	struct list_head *addr_list;
129 	struct inet_diag_msg *r;
130 	struct nlmsghdr  *nlh;
131 	int ext = req->idiag_ext;
132 	struct sctp_infox infox;
133 	void *info = NULL;
134 
135 	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
136 			nlmsg_flags);
137 	if (!nlh)
138 		return -EMSGSIZE;
139 
140 	r = nlmsg_data(nlh);
141 	BUG_ON(!sk_fullsock(sk));
142 
143 	r->idiag_timer = 0;
144 	r->idiag_retrans = 0;
145 	r->idiag_expires = 0;
146 	if (asoc) {
147 		inet_diag_msg_sctpasoc_fill(r, sk, asoc);
148 	} else {
149 		inet_diag_msg_common_fill(r, sk);
150 		r->idiag_state = sk->sk_state;
151 	}
152 
153 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
154 		goto errout;
155 
156 	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
157 		u32 mem[SK_MEMINFO_VARS];
158 		int amt;
159 
160 		if (asoc && asoc->ep->sndbuf_policy)
161 			amt = asoc->sndbuf_used;
162 		else
163 			amt = sk_wmem_alloc_get(sk);
164 		mem[SK_MEMINFO_WMEM_ALLOC] = amt;
165 		if (asoc && asoc->ep->rcvbuf_policy)
166 			amt = atomic_read(&asoc->rmem_alloc);
167 		else
168 			amt = sk_rmem_alloc_get(sk);
169 		mem[SK_MEMINFO_RMEM_ALLOC] = amt;
170 		mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
171 		mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
172 		mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
173 		mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
174 		mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
175 		mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
176 		mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
177 
178 		if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
179 			goto errout;
180 	}
181 
182 	if (ext & (1 << (INET_DIAG_INFO - 1))) {
183 		struct nlattr *attr;
184 
185 		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
186 					 sizeof(struct sctp_info),
187 					 INET_DIAG_PAD);
188 		if (!attr)
189 			goto errout;
190 
191 		info = nla_data(attr);
192 	}
193 	infox.sctpinfo = (struct sctp_info *)info;
194 	infox.asoc = asoc;
195 	sctp_diag_get_info(sk, r, &infox);
196 
197 	addr_list = asoc ? &asoc->base.bind_addr.address_list
198 			 : &ep->base.bind_addr.address_list;
199 	if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
200 		goto errout;
201 
202 	if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
203 		if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
204 			goto errout;
205 
206 	if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
207 		goto errout;
208 
209 	nlmsg_end(skb, nlh);
210 	return 0;
211 
212 errout:
213 	nlmsg_cancel(skb, nlh);
214 	return -EMSGSIZE;
215 }
216 
217 /* callback and param */
218 struct sctp_comm_param {
219 	struct sk_buff *skb;
220 	struct netlink_callback *cb;
221 	const struct inet_diag_req_v2 *r;
222 	const struct nlmsghdr *nlh;
223 	bool net_admin;
224 };
225 
226 static size_t inet_assoc_attr_size(struct sctp_association *asoc)
227 {
228 	int addrlen = sizeof(struct sockaddr_storage);
229 	int addrcnt = 0;
230 	struct sctp_sockaddr_entry *laddr;
231 
232 	list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
233 				list)
234 		addrcnt++;
235 
236 	return	  nla_total_size(sizeof(struct sctp_info))
237 		+ nla_total_size(addrlen * asoc->peer.transport_count)
238 		+ nla_total_size(addrlen * addrcnt)
239 		+ nla_total_size(sizeof(struct inet_diag_msg))
240 		+ inet_diag_msg_attrs_size()
241 		+ nla_total_size(sizeof(struct inet_diag_meminfo))
242 		+ 64;
243 }
244 
245 static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
246 {
247 	struct sctp_association *assoc = tsp->asoc;
248 	struct sctp_comm_param *commp = p;
249 	struct sock *sk = ep->base.sk;
250 	const struct inet_diag_req_v2 *req = commp->r;
251 	struct sk_buff *skb = commp->skb;
252 	struct sk_buff *rep;
253 	int err;
254 
255 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
256 	if (err)
257 		return err;
258 
259 	rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
260 	if (!rep)
261 		return -ENOMEM;
262 
263 	lock_sock(sk);
264 	if (ep != assoc->ep) {
265 		err = -EAGAIN;
266 		goto out;
267 	}
268 
269 	err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
270 				  NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
271 				  commp->nlh, commp->net_admin);
272 	if (err < 0) {
273 		WARN_ON(err == -EMSGSIZE);
274 		goto out;
275 	}
276 	release_sock(sk);
277 
278 	return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
279 
280 out:
281 	release_sock(sk);
282 	kfree_skb(rep);
283 	return err;
284 }
285 
286 static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
287 {
288 	struct sctp_comm_param *commp = p;
289 	struct sock *sk = ep->base.sk;
290 	struct sk_buff *skb = commp->skb;
291 	struct netlink_callback *cb = commp->cb;
292 	const struct inet_diag_req_v2 *r = commp->r;
293 	struct sctp_association *assoc;
294 	int err = 0;
295 
296 	lock_sock(sk);
297 	if (ep != tsp->asoc->ep)
298 		goto release;
299 	list_for_each_entry(assoc, &ep->asocs, asocs) {
300 		if (cb->args[4] < cb->args[1])
301 			goto next;
302 
303 		if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
304 		    r->id.idiag_sport)
305 			goto next;
306 		if (r->id.idiag_dport != htons(assoc->peer.port) &&
307 		    r->id.idiag_dport)
308 			goto next;
309 
310 		if (!cb->args[3] &&
311 		    inet_sctp_diag_fill(sk, NULL, skb, r,
312 					sk_user_ns(NETLINK_CB(cb->skb).sk),
313 					NETLINK_CB(cb->skb).portid,
314 					cb->nlh->nlmsg_seq,
315 					NLM_F_MULTI, cb->nlh,
316 					commp->net_admin) < 0) {
317 			err = 1;
318 			goto release;
319 		}
320 		cb->args[3] = 1;
321 
322 		if (inet_sctp_diag_fill(sk, assoc, skb, r,
323 					sk_user_ns(NETLINK_CB(cb->skb).sk),
324 					NETLINK_CB(cb->skb).portid,
325 					cb->nlh->nlmsg_seq, 0, cb->nlh,
326 					commp->net_admin) < 0) {
327 			err = 1;
328 			goto release;
329 		}
330 next:
331 		cb->args[4]++;
332 	}
333 	cb->args[1] = 0;
334 	cb->args[3] = 0;
335 	cb->args[4] = 0;
336 release:
337 	release_sock(sk);
338 	return err;
339 }
340 
341 static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
342 {
343 	struct sctp_comm_param *commp = p;
344 	struct sock *sk = ep->base.sk;
345 	const struct inet_diag_req_v2 *r = commp->r;
346 	struct sctp_association *assoc =
347 		list_entry(ep->asocs.next, struct sctp_association, asocs);
348 
349 	/* find the ep only once through the transports by this condition */
350 	if (tsp->asoc != assoc)
351 		return 0;
352 
353 	if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
354 		return 0;
355 
356 	return 1;
357 }
358 
359 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
360 {
361 	struct sctp_comm_param *commp = p;
362 	struct sock *sk = ep->base.sk;
363 	struct sk_buff *skb = commp->skb;
364 	struct netlink_callback *cb = commp->cb;
365 	const struct inet_diag_req_v2 *r = commp->r;
366 	struct net *net = sock_net(skb->sk);
367 	struct inet_sock *inet = inet_sk(sk);
368 	int err = 0;
369 
370 	if (!net_eq(sock_net(sk), net))
371 		goto out;
372 
373 	if (cb->args[4] < cb->args[1])
374 		goto next;
375 
376 	if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
377 		goto next;
378 
379 	if (r->sdiag_family != AF_UNSPEC &&
380 	    sk->sk_family != r->sdiag_family)
381 		goto next;
382 
383 	if (r->id.idiag_sport != inet->inet_sport &&
384 	    r->id.idiag_sport)
385 		goto next;
386 
387 	if (r->id.idiag_dport != inet->inet_dport &&
388 	    r->id.idiag_dport)
389 		goto next;
390 
391 	if (inet_sctp_diag_fill(sk, NULL, skb, r,
392 				sk_user_ns(NETLINK_CB(cb->skb).sk),
393 				NETLINK_CB(cb->skb).portid,
394 				cb->nlh->nlmsg_seq, NLM_F_MULTI,
395 				cb->nlh, commp->net_admin) < 0) {
396 		err = 2;
397 		goto out;
398 	}
399 next:
400 	cb->args[4]++;
401 out:
402 	return err;
403 }
404 
405 /* define the functions for sctp_diag_handler*/
406 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
407 			       void *info)
408 {
409 	struct sctp_infox *infox = (struct sctp_infox *)info;
410 
411 	if (infox->asoc) {
412 		r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
413 		r->idiag_wqueue = infox->asoc->sndbuf_used;
414 	} else {
415 		r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
416 		r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
417 	}
418 	if (infox->sctpinfo)
419 		sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
420 }
421 
422 static int sctp_diag_dump_one(struct netlink_callback *cb,
423 			      const struct inet_diag_req_v2 *req)
424 {
425 	struct sk_buff *skb = cb->skb;
426 	struct net *net = sock_net(skb->sk);
427 	const struct nlmsghdr *nlh = cb->nlh;
428 	union sctp_addr laddr, paddr;
429 	struct sctp_comm_param commp = {
430 		.skb = skb,
431 		.r = req,
432 		.nlh = nlh,
433 		.net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
434 	};
435 
436 	if (req->sdiag_family == AF_INET) {
437 		laddr.v4.sin_port = req->id.idiag_sport;
438 		laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
439 		laddr.v4.sin_family = AF_INET;
440 
441 		paddr.v4.sin_port = req->id.idiag_dport;
442 		paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
443 		paddr.v4.sin_family = AF_INET;
444 	} else {
445 		laddr.v6.sin6_port = req->id.idiag_sport;
446 		memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
447 		       sizeof(laddr.v6.sin6_addr));
448 		laddr.v6.sin6_family = AF_INET6;
449 
450 		paddr.v6.sin6_port = req->id.idiag_dport;
451 		memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
452 		       sizeof(paddr.v6.sin6_addr));
453 		paddr.v6.sin6_family = AF_INET6;
454 	}
455 
456 	return sctp_transport_lookup_process(sctp_sock_dump_one,
457 					     net, &laddr, &paddr, &commp);
458 }
459 
460 static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
461 			   const struct inet_diag_req_v2 *r)
462 {
463 	u32 idiag_states = r->idiag_states;
464 	struct net *net = sock_net(skb->sk);
465 	struct sctp_comm_param commp = {
466 		.skb = skb,
467 		.cb = cb,
468 		.r = r,
469 		.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
470 	};
471 	int pos = cb->args[2];
472 
473 	/* eps hashtable dumps
474 	 * args:
475 	 * 0 : if it will traversal listen sock
476 	 * 1 : to record the sock pos of this time's traversal
477 	 * 4 : to work as a temporary variable to traversal list
478 	 */
479 	if (cb->args[0] == 0) {
480 		if (!(idiag_states & TCPF_LISTEN))
481 			goto skip;
482 		if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
483 			goto done;
484 skip:
485 		cb->args[0] = 1;
486 		cb->args[1] = 0;
487 		cb->args[4] = 0;
488 	}
489 
490 	/* asocs by transport hashtable dump
491 	 * args:
492 	 * 1 : to record the assoc pos of this time's traversal
493 	 * 2 : to record the transport pos of this time's traversal
494 	 * 3 : to mark if we have dumped the ep info of the current asoc
495 	 * 4 : to work as a temporary variable to traversal list
496 	 * 5 : to save the sk we get from travelsing the tsp list.
497 	 */
498 	if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
499 		goto done;
500 
501 	sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
502 					net, &pos, &commp);
503 	cb->args[2] = pos;
504 
505 done:
506 	cb->args[1] = cb->args[4];
507 	cb->args[4] = 0;
508 }
509 
510 static const struct inet_diag_handler sctp_diag_handler = {
511 	.dump		 = sctp_diag_dump,
512 	.dump_one	 = sctp_diag_dump_one,
513 	.idiag_get_info  = sctp_diag_get_info,
514 	.idiag_type	 = IPPROTO_SCTP,
515 	.idiag_info_size = sizeof(struct sctp_info),
516 };
517 
518 static int __init sctp_diag_init(void)
519 {
520 	return inet_diag_register(&sctp_diag_handler);
521 }
522 
523 static void __exit sctp_diag_exit(void)
524 {
525 	inet_diag_unregister(&sctp_diag_handler);
526 }
527 
528 module_init(sctp_diag_init);
529 module_exit(sctp_diag_exit);
530 MODULE_LICENSE("GPL");
531 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
532