xref: /linux/net/sctp/diag.c (revision 8a79db5e83a5d52c74e6f3c40d6f312cf899213e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * (C) Copyright Red Hat Inc. 2017
4  *
5  * This file is part of the SCTP kernel implementation
6  *
7  * These functions implement sctp diag support.
8  *
9  * Please send any bug reports or fixes you make to the
10  * email addresched(es):
11  *    lksctp developers <linux-sctp@vger.kernel.org>
12  *
13  * Written or modified by:
14  *    Xin Long <lucien.xin@gmail.com>
15  */
16 
17 #include <linux/module.h>
18 #include <linux/inet_diag.h>
19 #include <linux/sock_diag.h>
20 #include <net/sctp/sctp.h>
21 
22 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
23 			       void *info);
24 
25 /* define some functions to make asoc/ep fill look clean */
26 static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
27 					struct sock *sk,
28 					struct sctp_association *asoc)
29 {
30 	union sctp_addr laddr, paddr;
31 	struct dst_entry *dst;
32 	struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
33 
34 	laddr = list_entry(asoc->base.bind_addr.address_list.next,
35 			   struct sctp_sockaddr_entry, list)->a;
36 	paddr = asoc->peer.primary_path->ipaddr;
37 	dst = asoc->peer.primary_path->dst;
38 
39 	r->idiag_family = sk->sk_family;
40 	r->id.idiag_sport = htons(asoc->base.bind_addr.port);
41 	r->id.idiag_dport = htons(asoc->peer.port);
42 	r->id.idiag_if = dst ? dst->dev->ifindex : 0;
43 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
44 
45 #if IS_ENABLED(CONFIG_IPV6)
46 	if (sk->sk_family == AF_INET6) {
47 		*(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
48 		*(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
49 	} else
50 #endif
51 	{
52 		memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
53 		memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
54 
55 		r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
56 		r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
57 	}
58 
59 	r->idiag_state = asoc->state;
60 	if (timer_pending(t3_rtx)) {
61 		r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
62 		r->idiag_retrans = asoc->rtx_data_chunks;
63 		r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
64 	} else {
65 		r->idiag_timer = 0;
66 		r->idiag_retrans = 0;
67 		r->idiag_expires = 0;
68 	}
69 }
70 
71 static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
72 					 struct list_head *address_list)
73 {
74 	struct sctp_sockaddr_entry *laddr;
75 	int addrlen = sizeof(struct sockaddr_storage);
76 	int addrcnt = 0;
77 	struct nlattr *attr;
78 	void *info = NULL;
79 
80 	list_for_each_entry_rcu(laddr, address_list, list)
81 		addrcnt++;
82 
83 	attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
84 	if (!attr)
85 		return -EMSGSIZE;
86 
87 	info = nla_data(attr);
88 	list_for_each_entry_rcu(laddr, address_list, list) {
89 		memcpy(info, &laddr->a, sizeof(laddr->a));
90 		memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
91 		info += addrlen;
92 	}
93 
94 	return 0;
95 }
96 
97 static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
98 					struct sctp_association *asoc)
99 {
100 	int addrlen = sizeof(struct sockaddr_storage);
101 	struct sctp_transport *from;
102 	struct nlattr *attr;
103 	void *info = NULL;
104 
105 	attr = nla_reserve(skb, INET_DIAG_PEERS,
106 			   addrlen * asoc->peer.transport_count);
107 	if (!attr)
108 		return -EMSGSIZE;
109 
110 	info = nla_data(attr);
111 	list_for_each_entry(from, &asoc->peer.transport_addr_list,
112 			    transports) {
113 		memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
114 		memset(info + sizeof(from->ipaddr), 0,
115 		       addrlen - sizeof(from->ipaddr));
116 		info += addrlen;
117 	}
118 
119 	return 0;
120 }
121 
122 /* sctp asoc/ep fill*/
123 static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
124 			       struct sk_buff *skb,
125 			       const struct inet_diag_req_v2 *req,
126 			       struct user_namespace *user_ns,
127 			       int portid, u32 seq, u16 nlmsg_flags,
128 			       const struct nlmsghdr *unlh,
129 			       bool net_admin)
130 {
131 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
132 	struct list_head *addr_list;
133 	struct inet_diag_msg *r;
134 	struct nlmsghdr  *nlh;
135 	int ext = req->idiag_ext;
136 	struct sctp_infox infox;
137 	void *info = NULL;
138 
139 	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
140 			nlmsg_flags);
141 	if (!nlh)
142 		return -EMSGSIZE;
143 
144 	r = nlmsg_data(nlh);
145 	BUG_ON(!sk_fullsock(sk));
146 
147 	if (asoc) {
148 		inet_diag_msg_sctpasoc_fill(r, sk, asoc);
149 	} else {
150 		inet_diag_msg_common_fill(r, sk);
151 		r->idiag_state = sk->sk_state;
152 		r->idiag_timer = 0;
153 		r->idiag_retrans = 0;
154 	}
155 
156 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
157 		goto errout;
158 
159 	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
160 		u32 mem[SK_MEMINFO_VARS];
161 		int amt;
162 
163 		if (asoc && asoc->ep->sndbuf_policy)
164 			amt = asoc->sndbuf_used;
165 		else
166 			amt = sk_wmem_alloc_get(sk);
167 		mem[SK_MEMINFO_WMEM_ALLOC] = amt;
168 		if (asoc && asoc->ep->rcvbuf_policy)
169 			amt = atomic_read(&asoc->rmem_alloc);
170 		else
171 			amt = sk_rmem_alloc_get(sk);
172 		mem[SK_MEMINFO_RMEM_ALLOC] = amt;
173 		mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
174 		mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
175 		mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
176 		mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
177 		mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
178 		mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
179 		mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
180 
181 		if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
182 			goto errout;
183 	}
184 
185 	if (ext & (1 << (INET_DIAG_INFO - 1))) {
186 		struct nlattr *attr;
187 
188 		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
189 					 sizeof(struct sctp_info),
190 					 INET_DIAG_PAD);
191 		if (!attr)
192 			goto errout;
193 
194 		info = nla_data(attr);
195 	}
196 	infox.sctpinfo = (struct sctp_info *)info;
197 	infox.asoc = asoc;
198 	sctp_diag_get_info(sk, r, &infox);
199 
200 	addr_list = asoc ? &asoc->base.bind_addr.address_list
201 			 : &ep->base.bind_addr.address_list;
202 	if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
203 		goto errout;
204 
205 	if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
206 		if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
207 			goto errout;
208 
209 	if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
210 		goto errout;
211 
212 	nlmsg_end(skb, nlh);
213 	return 0;
214 
215 errout:
216 	nlmsg_cancel(skb, nlh);
217 	return -EMSGSIZE;
218 }
219 
220 /* callback and param */
221 struct sctp_comm_param {
222 	struct sk_buff *skb;
223 	struct netlink_callback *cb;
224 	const struct inet_diag_req_v2 *r;
225 	const struct nlmsghdr *nlh;
226 	bool net_admin;
227 };
228 
229 static size_t inet_assoc_attr_size(struct sctp_association *asoc)
230 {
231 	int addrlen = sizeof(struct sockaddr_storage);
232 	int addrcnt = 0;
233 	struct sctp_sockaddr_entry *laddr;
234 
235 	list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
236 				list)
237 		addrcnt++;
238 
239 	return	  nla_total_size(sizeof(struct sctp_info))
240 		+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
241 		+ nla_total_size(1) /* INET_DIAG_TOS */
242 		+ nla_total_size(1) /* INET_DIAG_TCLASS */
243 		+ nla_total_size(4) /* INET_DIAG_MARK */
244 		+ nla_total_size(4) /* INET_DIAG_CLASS_ID */
245 		+ nla_total_size(addrlen * asoc->peer.transport_count)
246 		+ nla_total_size(addrlen * addrcnt)
247 		+ nla_total_size(sizeof(struct inet_diag_meminfo))
248 		+ nla_total_size(sizeof(struct inet_diag_msg))
249 		+ 64;
250 }
251 
252 static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
253 {
254 	struct sctp_association *assoc = tsp->asoc;
255 	struct sock *sk = tsp->asoc->base.sk;
256 	struct sctp_comm_param *commp = p;
257 	struct sk_buff *in_skb = commp->skb;
258 	const struct inet_diag_req_v2 *req = commp->r;
259 	const struct nlmsghdr *nlh = commp->nlh;
260 	struct net *net = sock_net(in_skb->sk);
261 	struct sk_buff *rep;
262 	int err;
263 
264 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
265 	if (err)
266 		goto out;
267 
268 	err = -ENOMEM;
269 	rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
270 	if (!rep)
271 		goto out;
272 
273 	lock_sock(sk);
274 	if (sk != assoc->base.sk) {
275 		release_sock(sk);
276 		sk = assoc->base.sk;
277 		lock_sock(sk);
278 	}
279 	err = inet_sctp_diag_fill(sk, assoc, rep, req,
280 				  sk_user_ns(NETLINK_CB(in_skb).sk),
281 				  NETLINK_CB(in_skb).portid,
282 				  nlh->nlmsg_seq, 0, nlh,
283 				  commp->net_admin);
284 	release_sock(sk);
285 	if (err < 0) {
286 		WARN_ON(err == -EMSGSIZE);
287 		kfree_skb(rep);
288 		goto out;
289 	}
290 
291 	err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
292 			      MSG_DONTWAIT);
293 	if (err > 0)
294 		err = 0;
295 out:
296 	return err;
297 }
298 
299 static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
300 {
301 	struct sctp_endpoint *ep = tsp->asoc->ep;
302 	struct sctp_comm_param *commp = p;
303 	struct sock *sk = ep->base.sk;
304 	struct sk_buff *skb = commp->skb;
305 	struct netlink_callback *cb = commp->cb;
306 	const struct inet_diag_req_v2 *r = commp->r;
307 	struct sctp_association *assoc;
308 	int err = 0;
309 
310 	lock_sock(sk);
311 	list_for_each_entry(assoc, &ep->asocs, asocs) {
312 		if (cb->args[4] < cb->args[1])
313 			goto next;
314 
315 		if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
316 		    r->id.idiag_sport)
317 			goto next;
318 		if (r->id.idiag_dport != htons(assoc->peer.port) &&
319 		    r->id.idiag_dport)
320 			goto next;
321 
322 		if (!cb->args[3] &&
323 		    inet_sctp_diag_fill(sk, NULL, skb, r,
324 					sk_user_ns(NETLINK_CB(cb->skb).sk),
325 					NETLINK_CB(cb->skb).portid,
326 					cb->nlh->nlmsg_seq,
327 					NLM_F_MULTI, cb->nlh,
328 					commp->net_admin) < 0) {
329 			err = 1;
330 			goto release;
331 		}
332 		cb->args[3] = 1;
333 
334 		if (inet_sctp_diag_fill(sk, assoc, skb, r,
335 					sk_user_ns(NETLINK_CB(cb->skb).sk),
336 					NETLINK_CB(cb->skb).portid,
337 					cb->nlh->nlmsg_seq, 0, cb->nlh,
338 					commp->net_admin) < 0) {
339 			err = 1;
340 			goto release;
341 		}
342 next:
343 		cb->args[4]++;
344 	}
345 	cb->args[1] = 0;
346 	cb->args[3] = 0;
347 	cb->args[4] = 0;
348 release:
349 	release_sock(sk);
350 	return err;
351 }
352 
353 static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
354 {
355 	struct sctp_endpoint *ep = tsp->asoc->ep;
356 	struct sctp_comm_param *commp = p;
357 	struct sock *sk = ep->base.sk;
358 	const struct inet_diag_req_v2 *r = commp->r;
359 	struct sctp_association *assoc =
360 		list_entry(ep->asocs.next, struct sctp_association, asocs);
361 
362 	/* find the ep only once through the transports by this condition */
363 	if (tsp->asoc != assoc)
364 		return 0;
365 
366 	if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
367 		return 0;
368 
369 	return 1;
370 }
371 
372 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
373 {
374 	struct sctp_comm_param *commp = p;
375 	struct sock *sk = ep->base.sk;
376 	struct sk_buff *skb = commp->skb;
377 	struct netlink_callback *cb = commp->cb;
378 	const struct inet_diag_req_v2 *r = commp->r;
379 	struct net *net = sock_net(skb->sk);
380 	struct inet_sock *inet = inet_sk(sk);
381 	int err = 0;
382 
383 	if (!net_eq(sock_net(sk), net))
384 		goto out;
385 
386 	if (cb->args[4] < cb->args[1])
387 		goto next;
388 
389 	if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
390 		goto next;
391 
392 	if (r->sdiag_family != AF_UNSPEC &&
393 	    sk->sk_family != r->sdiag_family)
394 		goto next;
395 
396 	if (r->id.idiag_sport != inet->inet_sport &&
397 	    r->id.idiag_sport)
398 		goto next;
399 
400 	if (r->id.idiag_dport != inet->inet_dport &&
401 	    r->id.idiag_dport)
402 		goto next;
403 
404 	if (inet_sctp_diag_fill(sk, NULL, skb, r,
405 				sk_user_ns(NETLINK_CB(cb->skb).sk),
406 				NETLINK_CB(cb->skb).portid,
407 				cb->nlh->nlmsg_seq, NLM_F_MULTI,
408 				cb->nlh, commp->net_admin) < 0) {
409 		err = 2;
410 		goto out;
411 	}
412 next:
413 	cb->args[4]++;
414 out:
415 	return err;
416 }
417 
418 /* define the functions for sctp_diag_handler*/
419 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
420 			       void *info)
421 {
422 	struct sctp_infox *infox = (struct sctp_infox *)info;
423 
424 	if (infox->asoc) {
425 		r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
426 		r->idiag_wqueue = infox->asoc->sndbuf_used;
427 	} else {
428 		r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
429 		r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
430 	}
431 	if (infox->sctpinfo)
432 		sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
433 }
434 
435 static int sctp_diag_dump_one(struct sk_buff *in_skb,
436 			      const struct nlmsghdr *nlh,
437 			      const struct inet_diag_req_v2 *req)
438 {
439 	struct net *net = sock_net(in_skb->sk);
440 	union sctp_addr laddr, paddr;
441 	struct sctp_comm_param commp = {
442 		.skb = in_skb,
443 		.r = req,
444 		.nlh = nlh,
445 		.net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
446 	};
447 
448 	if (req->sdiag_family == AF_INET) {
449 		laddr.v4.sin_port = req->id.idiag_sport;
450 		laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
451 		laddr.v4.sin_family = AF_INET;
452 
453 		paddr.v4.sin_port = req->id.idiag_dport;
454 		paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
455 		paddr.v4.sin_family = AF_INET;
456 	} else {
457 		laddr.v6.sin6_port = req->id.idiag_sport;
458 		memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
459 		       sizeof(laddr.v6.sin6_addr));
460 		laddr.v6.sin6_family = AF_INET6;
461 
462 		paddr.v6.sin6_port = req->id.idiag_dport;
463 		memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
464 		       sizeof(paddr.v6.sin6_addr));
465 		paddr.v6.sin6_family = AF_INET6;
466 	}
467 
468 	return sctp_transport_lookup_process(sctp_tsp_dump_one,
469 					     net, &laddr, &paddr, &commp);
470 }
471 
472 static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
473 			   const struct inet_diag_req_v2 *r, struct nlattr *bc)
474 {
475 	u32 idiag_states = r->idiag_states;
476 	struct net *net = sock_net(skb->sk);
477 	struct sctp_comm_param commp = {
478 		.skb = skb,
479 		.cb = cb,
480 		.r = r,
481 		.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
482 	};
483 	int pos = cb->args[2];
484 
485 	/* eps hashtable dumps
486 	 * args:
487 	 * 0 : if it will traversal listen sock
488 	 * 1 : to record the sock pos of this time's traversal
489 	 * 4 : to work as a temporary variable to traversal list
490 	 */
491 	if (cb->args[0] == 0) {
492 		if (!(idiag_states & TCPF_LISTEN))
493 			goto skip;
494 		if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
495 			goto done;
496 skip:
497 		cb->args[0] = 1;
498 		cb->args[1] = 0;
499 		cb->args[4] = 0;
500 	}
501 
502 	/* asocs by transport hashtable dump
503 	 * args:
504 	 * 1 : to record the assoc pos of this time's traversal
505 	 * 2 : to record the transport pos of this time's traversal
506 	 * 3 : to mark if we have dumped the ep info of the current asoc
507 	 * 4 : to work as a temporary variable to traversal list
508 	 * 5 : to save the sk we get from travelsing the tsp list.
509 	 */
510 	if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
511 		goto done;
512 
513 	sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
514 				net, &pos, &commp);
515 	cb->args[2] = pos;
516 
517 done:
518 	cb->args[1] = cb->args[4];
519 	cb->args[4] = 0;
520 }
521 
522 static const struct inet_diag_handler sctp_diag_handler = {
523 	.dump		 = sctp_diag_dump,
524 	.dump_one	 = sctp_diag_dump_one,
525 	.idiag_get_info  = sctp_diag_get_info,
526 	.idiag_type	 = IPPROTO_SCTP,
527 	.idiag_info_size = sizeof(struct sctp_info),
528 };
529 
530 static int __init sctp_diag_init(void)
531 {
532 	return inet_diag_register(&sctp_diag_handler);
533 }
534 
535 static void __exit sctp_diag_exit(void)
536 {
537 	inet_diag_unregister(&sctp_diag_handler);
538 }
539 
540 module_init(sctp_diag_init);
541 module_exit(sctp_diag_exit);
542 MODULE_LICENSE("GPL");
543 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
544