xref: /linux/net/ipv4/inet_diag.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2  * inet_diag.c	Module for monitoring INET transport protocols sockets.
3  *
4  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5  *
6  *	This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
21 
22 #include <net/icmp.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
31 
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
34 
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
37 
38 static const struct inet_diag_handler **inet_diag_table;
39 
40 struct inet_diag_entry {
41 	__be32 *saddr;
42 	__be32 *daddr;
43 	u16 sport;
44 	u16 dport;
45 	u16 family;
46 	u16 userlocks;
47 };
48 
49 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
50 	RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
51 
52 static DEFINE_MUTEX(inet_diag_table_mutex);
53 
54 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
55 {
56 	if (!inet_diag_table[proto])
57 		request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
58 			       NETLINK_SOCK_DIAG, AF_INET, proto);
59 
60 	mutex_lock(&inet_diag_table_mutex);
61 	if (!inet_diag_table[proto])
62 		return ERR_PTR(-ENOENT);
63 
64 	return inet_diag_table[proto];
65 }
66 
67 static inline void inet_diag_unlock_handler(
68 	const struct inet_diag_handler *handler)
69 {
70 	mutex_unlock(&inet_diag_table_mutex);
71 }
72 
73 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
74 			      struct sk_buff *skb, struct inet_diag_req *req,
75 			      u32 pid, u32 seq, u16 nlmsg_flags,
76 			      const struct nlmsghdr *unlh)
77 {
78 	const struct inet_sock *inet = inet_sk(sk);
79 	struct inet_diag_msg *r;
80 	struct nlmsghdr  *nlh;
81 	void *info = NULL;
82 	struct inet_diag_meminfo  *minfo = NULL;
83 	unsigned char	 *b = skb_tail_pointer(skb);
84 	const struct inet_diag_handler *handler;
85 	int ext = req->idiag_ext;
86 
87 	handler = inet_diag_table[req->sdiag_protocol];
88 	BUG_ON(handler == NULL);
89 
90 	nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
91 	nlh->nlmsg_flags = nlmsg_flags;
92 
93 	r = NLMSG_DATA(nlh);
94 	BUG_ON(sk->sk_state == TCP_TIME_WAIT);
95 
96 	if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
97 		minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
98 
99 	r->idiag_family = sk->sk_family;
100 	r->idiag_state = sk->sk_state;
101 	r->idiag_timer = 0;
102 	r->idiag_retrans = 0;
103 
104 	r->id.idiag_if = sk->sk_bound_dev_if;
105 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
106 
107 	r->id.idiag_sport = inet->inet_sport;
108 	r->id.idiag_dport = inet->inet_dport;
109 	r->id.idiag_src[0] = inet->inet_rcv_saddr;
110 	r->id.idiag_dst[0] = inet->inet_daddr;
111 
112 	/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
113 	 * hence this needs to be included regardless of socket family.
114 	 */
115 	if (ext & (1 << (INET_DIAG_TOS - 1)))
116 		RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
117 
118 #if IS_ENABLED(CONFIG_IPV6)
119 	if (r->idiag_family == AF_INET6) {
120 		const struct ipv6_pinfo *np = inet6_sk(sk);
121 
122 		*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
123 		*(struct in6_addr *)r->id.idiag_dst = np->daddr;
124 		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
125 			RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
126 	}
127 #endif
128 
129 	r->idiag_uid = sock_i_uid(sk);
130 	r->idiag_inode = sock_i_ino(sk);
131 
132 	if (minfo) {
133 		minfo->idiag_rmem = sk_rmem_alloc_get(sk);
134 		minfo->idiag_wmem = sk->sk_wmem_queued;
135 		minfo->idiag_fmem = sk->sk_forward_alloc;
136 		minfo->idiag_tmem = sk_wmem_alloc_get(sk);
137 	}
138 
139 	if (icsk == NULL) {
140 		r->idiag_rqueue = r->idiag_wqueue = 0;
141 		goto out;
142 	}
143 
144 #define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
145 
146 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
147 		r->idiag_timer = 1;
148 		r->idiag_retrans = icsk->icsk_retransmits;
149 		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
150 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
151 		r->idiag_timer = 4;
152 		r->idiag_retrans = icsk->icsk_probes_out;
153 		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
154 	} else if (timer_pending(&sk->sk_timer)) {
155 		r->idiag_timer = 2;
156 		r->idiag_retrans = icsk->icsk_probes_out;
157 		r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
158 	} else {
159 		r->idiag_timer = 0;
160 		r->idiag_expires = 0;
161 	}
162 #undef EXPIRES_IN_MS
163 
164 	if (ext & (1 << (INET_DIAG_INFO - 1)))
165 		info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
166 
167 	if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
168 		const size_t len = strlen(icsk->icsk_ca_ops->name);
169 
170 		strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
171 		       icsk->icsk_ca_ops->name);
172 	}
173 
174 	handler->idiag_get_info(sk, r, info);
175 
176 	if (sk->sk_state < TCP_TIME_WAIT &&
177 	    icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
178 		icsk->icsk_ca_ops->get_info(sk, ext, skb);
179 
180 out:
181 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
182 	return skb->len;
183 
184 rtattr_failure:
185 nlmsg_failure:
186 	nlmsg_trim(skb, b);
187 	return -EMSGSIZE;
188 }
189 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
190 
191 static int inet_csk_diag_fill(struct sock *sk,
192 			      struct sk_buff *skb, struct inet_diag_req *req,
193 			      u32 pid, u32 seq, u16 nlmsg_flags,
194 			      const struct nlmsghdr *unlh)
195 {
196 	return inet_sk_diag_fill(sk, inet_csk(sk),
197 			skb, req, pid, seq, nlmsg_flags, unlh);
198 }
199 
200 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
201 			       struct sk_buff *skb, struct inet_diag_req *req,
202 			       u32 pid, u32 seq, u16 nlmsg_flags,
203 			       const struct nlmsghdr *unlh)
204 {
205 	long tmo;
206 	struct inet_diag_msg *r;
207 	const unsigned char *previous_tail = skb_tail_pointer(skb);
208 	struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
209 					 unlh->nlmsg_type, sizeof(*r));
210 
211 	r = NLMSG_DATA(nlh);
212 	BUG_ON(tw->tw_state != TCP_TIME_WAIT);
213 
214 	nlh->nlmsg_flags = nlmsg_flags;
215 
216 	tmo = tw->tw_ttd - jiffies;
217 	if (tmo < 0)
218 		tmo = 0;
219 
220 	r->idiag_family	      = tw->tw_family;
221 	r->idiag_retrans      = 0;
222 	r->id.idiag_if	      = tw->tw_bound_dev_if;
223 	sock_diag_save_cookie(tw, r->id.idiag_cookie);
224 	r->id.idiag_sport     = tw->tw_sport;
225 	r->id.idiag_dport     = tw->tw_dport;
226 	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
227 	r->id.idiag_dst[0]    = tw->tw_daddr;
228 	r->idiag_state	      = tw->tw_substate;
229 	r->idiag_timer	      = 3;
230 	r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
231 	r->idiag_rqueue	      = 0;
232 	r->idiag_wqueue	      = 0;
233 	r->idiag_uid	      = 0;
234 	r->idiag_inode	      = 0;
235 #if IS_ENABLED(CONFIG_IPV6)
236 	if (tw->tw_family == AF_INET6) {
237 		const struct inet6_timewait_sock *tw6 =
238 						inet6_twsk((struct sock *)tw);
239 
240 		*(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
241 		*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
242 	}
243 #endif
244 	nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
245 	return skb->len;
246 nlmsg_failure:
247 	nlmsg_trim(skb, previous_tail);
248 	return -EMSGSIZE;
249 }
250 
251 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
252 			struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
253 			const struct nlmsghdr *unlh)
254 {
255 	if (sk->sk_state == TCP_TIME_WAIT)
256 		return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
257 					   skb, r, pid, seq, nlmsg_flags,
258 					   unlh);
259 	return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
260 }
261 
262 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
263 		const struct nlmsghdr *nlh, struct inet_diag_req *req)
264 {
265 	int err;
266 	struct sock *sk;
267 	struct sk_buff *rep;
268 
269 	err = -EINVAL;
270 	if (req->sdiag_family == AF_INET) {
271 		sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
272 				 req->id.idiag_dport, req->id.idiag_src[0],
273 				 req->id.idiag_sport, req->id.idiag_if);
274 	}
275 #if IS_ENABLED(CONFIG_IPV6)
276 	else if (req->sdiag_family == AF_INET6) {
277 		sk = inet6_lookup(&init_net, hashinfo,
278 				  (struct in6_addr *)req->id.idiag_dst,
279 				  req->id.idiag_dport,
280 				  (struct in6_addr *)req->id.idiag_src,
281 				  req->id.idiag_sport,
282 				  req->id.idiag_if);
283 	}
284 #endif
285 	else {
286 		goto out_nosk;
287 	}
288 
289 	err = -ENOENT;
290 	if (sk == NULL)
291 		goto out_nosk;
292 
293 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
294 	if (err)
295 		goto out;
296 
297 	err = -ENOMEM;
298 	rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
299 				     sizeof(struct inet_diag_meminfo) +
300 				     sizeof(struct tcp_info) + 64)),
301 			GFP_KERNEL);
302 	if (!rep)
303 		goto out;
304 
305 	err = sk_diag_fill(sk, rep, req,
306 			   NETLINK_CB(in_skb).pid,
307 			   nlh->nlmsg_seq, 0, nlh);
308 	if (err < 0) {
309 		WARN_ON(err == -EMSGSIZE);
310 		kfree_skb(rep);
311 		goto out;
312 	}
313 	err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
314 			      MSG_DONTWAIT);
315 	if (err > 0)
316 		err = 0;
317 
318 out:
319 	if (sk) {
320 		if (sk->sk_state == TCP_TIME_WAIT)
321 			inet_twsk_put((struct inet_timewait_sock *)sk);
322 		else
323 			sock_put(sk);
324 	}
325 out_nosk:
326 	return err;
327 }
328 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
329 
330 static int inet_diag_get_exact(struct sk_buff *in_skb,
331 			       const struct nlmsghdr *nlh,
332 			       struct inet_diag_req *req)
333 {
334 	const struct inet_diag_handler *handler;
335 	int err;
336 
337 	handler = inet_diag_lock_handler(req->sdiag_protocol);
338 	if (IS_ERR(handler))
339 		err = PTR_ERR(handler);
340 	else
341 		err = handler->dump_one(in_skb, nlh, req);
342 	inet_diag_unlock_handler(handler);
343 
344 	return err;
345 }
346 
347 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
348 {
349 	int words = bits >> 5;
350 
351 	bits &= 0x1f;
352 
353 	if (words) {
354 		if (memcmp(a1, a2, words << 2))
355 			return 0;
356 	}
357 	if (bits) {
358 		__be32 w1, w2;
359 		__be32 mask;
360 
361 		w1 = a1[words];
362 		w2 = a2[words];
363 
364 		mask = htonl((0xffffffff) << (32 - bits));
365 
366 		if ((w1 ^ w2) & mask)
367 			return 0;
368 	}
369 
370 	return 1;
371 }
372 
373 
374 static int inet_diag_bc_run(const struct nlattr *_bc,
375 		const struct inet_diag_entry *entry)
376 {
377 	const void *bc = nla_data(_bc);
378 	int len = nla_len(_bc);
379 
380 	while (len > 0) {
381 		int yes = 1;
382 		const struct inet_diag_bc_op *op = bc;
383 
384 		switch (op->code) {
385 		case INET_DIAG_BC_NOP:
386 			break;
387 		case INET_DIAG_BC_JMP:
388 			yes = 0;
389 			break;
390 		case INET_DIAG_BC_S_GE:
391 			yes = entry->sport >= op[1].no;
392 			break;
393 		case INET_DIAG_BC_S_LE:
394 			yes = entry->sport <= op[1].no;
395 			break;
396 		case INET_DIAG_BC_D_GE:
397 			yes = entry->dport >= op[1].no;
398 			break;
399 		case INET_DIAG_BC_D_LE:
400 			yes = entry->dport <= op[1].no;
401 			break;
402 		case INET_DIAG_BC_AUTO:
403 			yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
404 			break;
405 		case INET_DIAG_BC_S_COND:
406 		case INET_DIAG_BC_D_COND: {
407 			struct inet_diag_hostcond *cond;
408 			__be32 *addr;
409 
410 			cond = (struct inet_diag_hostcond *)(op + 1);
411 			if (cond->port != -1 &&
412 			    cond->port != (op->code == INET_DIAG_BC_S_COND ?
413 					     entry->sport : entry->dport)) {
414 				yes = 0;
415 				break;
416 			}
417 
418 			if (cond->prefix_len == 0)
419 				break;
420 
421 			if (op->code == INET_DIAG_BC_S_COND)
422 				addr = entry->saddr;
423 			else
424 				addr = entry->daddr;
425 
426 			if (bitstring_match(addr, cond->addr,
427 					    cond->prefix_len))
428 				break;
429 			if (entry->family == AF_INET6 &&
430 			    cond->family == AF_INET) {
431 				if (addr[0] == 0 && addr[1] == 0 &&
432 				    addr[2] == htonl(0xffff) &&
433 				    bitstring_match(addr + 3, cond->addr,
434 						    cond->prefix_len))
435 					break;
436 			}
437 			yes = 0;
438 			break;
439 		}
440 		}
441 
442 		if (yes) {
443 			len -= op->yes;
444 			bc += op->yes;
445 		} else {
446 			len -= op->no;
447 			bc += op->no;
448 		}
449 	}
450 	return len == 0;
451 }
452 
453 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
454 {
455 	struct inet_diag_entry entry;
456 	struct inet_sock *inet = inet_sk(sk);
457 
458 	if (bc == NULL)
459 		return 1;
460 
461 	entry.family = sk->sk_family;
462 #if IS_ENABLED(CONFIG_IPV6)
463 	if (entry.family == AF_INET6) {
464 		struct ipv6_pinfo *np = inet6_sk(sk);
465 
466 		entry.saddr = np->rcv_saddr.s6_addr32;
467 		entry.daddr = np->daddr.s6_addr32;
468 	} else
469 #endif
470 	{
471 		entry.saddr = &inet->inet_rcv_saddr;
472 		entry.daddr = &inet->inet_daddr;
473 	}
474 	entry.sport = inet->inet_num;
475 	entry.dport = ntohs(inet->inet_dport);
476 	entry.userlocks = sk->sk_userlocks;
477 
478 	return inet_diag_bc_run(bc, &entry);
479 }
480 EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
481 
482 static int valid_cc(const void *bc, int len, int cc)
483 {
484 	while (len >= 0) {
485 		const struct inet_diag_bc_op *op = bc;
486 
487 		if (cc > len)
488 			return 0;
489 		if (cc == len)
490 			return 1;
491 		if (op->yes < 4 || op->yes & 3)
492 			return 0;
493 		len -= op->yes;
494 		bc  += op->yes;
495 	}
496 	return 0;
497 }
498 
499 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
500 {
501 	const void *bc = bytecode;
502 	int  len = bytecode_len;
503 
504 	while (len > 0) {
505 		const struct inet_diag_bc_op *op = bc;
506 
507 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
508 		switch (op->code) {
509 		case INET_DIAG_BC_AUTO:
510 		case INET_DIAG_BC_S_COND:
511 		case INET_DIAG_BC_D_COND:
512 		case INET_DIAG_BC_S_GE:
513 		case INET_DIAG_BC_S_LE:
514 		case INET_DIAG_BC_D_GE:
515 		case INET_DIAG_BC_D_LE:
516 		case INET_DIAG_BC_JMP:
517 			if (op->no < 4 || op->no > len + 4 || op->no & 3)
518 				return -EINVAL;
519 			if (op->no < len &&
520 			    !valid_cc(bytecode, bytecode_len, len - op->no))
521 				return -EINVAL;
522 			break;
523 		case INET_DIAG_BC_NOP:
524 			break;
525 		default:
526 			return -EINVAL;
527 		}
528 		if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
529 			return -EINVAL;
530 		bc  += op->yes;
531 		len -= op->yes;
532 	}
533 	return len == 0 ? 0 : -EINVAL;
534 }
535 
536 static int inet_csk_diag_dump(struct sock *sk,
537 			      struct sk_buff *skb,
538 			      struct netlink_callback *cb,
539 			      struct inet_diag_req *r,
540 			      const struct nlattr *bc)
541 {
542 	if (!inet_diag_bc_sk(bc, sk))
543 		return 0;
544 
545 	return inet_csk_diag_fill(sk, skb, r,
546 				  NETLINK_CB(cb->skb).pid,
547 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
548 }
549 
550 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
551 			       struct sk_buff *skb,
552 			       struct netlink_callback *cb,
553 			       struct inet_diag_req *r,
554 			       const struct nlattr *bc)
555 {
556 	if (bc != NULL) {
557 		struct inet_diag_entry entry;
558 
559 		entry.family = tw->tw_family;
560 #if IS_ENABLED(CONFIG_IPV6)
561 		if (tw->tw_family == AF_INET6) {
562 			struct inet6_timewait_sock *tw6 =
563 						inet6_twsk((struct sock *)tw);
564 			entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
565 			entry.daddr = tw6->tw_v6_daddr.s6_addr32;
566 		} else
567 #endif
568 		{
569 			entry.saddr = &tw->tw_rcv_saddr;
570 			entry.daddr = &tw->tw_daddr;
571 		}
572 		entry.sport = tw->tw_num;
573 		entry.dport = ntohs(tw->tw_dport);
574 		entry.userlocks = 0;
575 
576 		if (!inet_diag_bc_run(bc, &entry))
577 			return 0;
578 	}
579 
580 	return inet_twsk_diag_fill(tw, skb, r,
581 				   NETLINK_CB(cb->skb).pid,
582 				   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
583 }
584 
585 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
586 			      struct request_sock *req, u32 pid, u32 seq,
587 			      const struct nlmsghdr *unlh)
588 {
589 	const struct inet_request_sock *ireq = inet_rsk(req);
590 	struct inet_sock *inet = inet_sk(sk);
591 	unsigned char *b = skb_tail_pointer(skb);
592 	struct inet_diag_msg *r;
593 	struct nlmsghdr *nlh;
594 	long tmo;
595 
596 	nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
597 	nlh->nlmsg_flags = NLM_F_MULTI;
598 	r = NLMSG_DATA(nlh);
599 
600 	r->idiag_family = sk->sk_family;
601 	r->idiag_state = TCP_SYN_RECV;
602 	r->idiag_timer = 1;
603 	r->idiag_retrans = req->retrans;
604 
605 	r->id.idiag_if = sk->sk_bound_dev_if;
606 	sock_diag_save_cookie(req, r->id.idiag_cookie);
607 
608 	tmo = req->expires - jiffies;
609 	if (tmo < 0)
610 		tmo = 0;
611 
612 	r->id.idiag_sport = inet->inet_sport;
613 	r->id.idiag_dport = ireq->rmt_port;
614 	r->id.idiag_src[0] = ireq->loc_addr;
615 	r->id.idiag_dst[0] = ireq->rmt_addr;
616 	r->idiag_expires = jiffies_to_msecs(tmo);
617 	r->idiag_rqueue = 0;
618 	r->idiag_wqueue = 0;
619 	r->idiag_uid = sock_i_uid(sk);
620 	r->idiag_inode = 0;
621 #if IS_ENABLED(CONFIG_IPV6)
622 	if (r->idiag_family == AF_INET6) {
623 		*(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
624 		*(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
625 	}
626 #endif
627 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
628 
629 	return skb->len;
630 
631 nlmsg_failure:
632 	nlmsg_trim(skb, b);
633 	return -1;
634 }
635 
636 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
637 			       struct netlink_callback *cb,
638 			       struct inet_diag_req *r,
639 			       const struct nlattr *bc)
640 {
641 	struct inet_diag_entry entry;
642 	struct inet_connection_sock *icsk = inet_csk(sk);
643 	struct listen_sock *lopt;
644 	struct inet_sock *inet = inet_sk(sk);
645 	int j, s_j;
646 	int reqnum, s_reqnum;
647 	int err = 0;
648 
649 	s_j = cb->args[3];
650 	s_reqnum = cb->args[4];
651 
652 	if (s_j > 0)
653 		s_j--;
654 
655 	entry.family = sk->sk_family;
656 
657 	read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
658 
659 	lopt = icsk->icsk_accept_queue.listen_opt;
660 	if (!lopt || !lopt->qlen)
661 		goto out;
662 
663 	if (bc != NULL) {
664 		entry.sport = inet->inet_num;
665 		entry.userlocks = sk->sk_userlocks;
666 	}
667 
668 	for (j = s_j; j < lopt->nr_table_entries; j++) {
669 		struct request_sock *req, *head = lopt->syn_table[j];
670 
671 		reqnum = 0;
672 		for (req = head; req; reqnum++, req = req->dl_next) {
673 			struct inet_request_sock *ireq = inet_rsk(req);
674 
675 			if (reqnum < s_reqnum)
676 				continue;
677 			if (r->id.idiag_dport != ireq->rmt_port &&
678 			    r->id.idiag_dport)
679 				continue;
680 
681 			if (bc) {
682 				entry.saddr =
683 #if IS_ENABLED(CONFIG_IPV6)
684 					(entry.family == AF_INET6) ?
685 					inet6_rsk(req)->loc_addr.s6_addr32 :
686 #endif
687 					&ireq->loc_addr;
688 				entry.daddr =
689 #if IS_ENABLED(CONFIG_IPV6)
690 					(entry.family == AF_INET6) ?
691 					inet6_rsk(req)->rmt_addr.s6_addr32 :
692 #endif
693 					&ireq->rmt_addr;
694 				entry.dport = ntohs(ireq->rmt_port);
695 
696 				if (!inet_diag_bc_run(bc, &entry))
697 					continue;
698 			}
699 
700 			err = inet_diag_fill_req(skb, sk, req,
701 					       NETLINK_CB(cb->skb).pid,
702 					       cb->nlh->nlmsg_seq, cb->nlh);
703 			if (err < 0) {
704 				cb->args[3] = j + 1;
705 				cb->args[4] = reqnum;
706 				goto out;
707 			}
708 		}
709 
710 		s_reqnum = 0;
711 	}
712 
713 out:
714 	read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
715 
716 	return err;
717 }
718 
719 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
720 		struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
721 {
722 	int i, num;
723 	int s_i, s_num;
724 
725 	s_i = cb->args[1];
726 	s_num = num = cb->args[2];
727 
728 	if (cb->args[0] == 0) {
729 		if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
730 			goto skip_listen_ht;
731 
732 		for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
733 			struct sock *sk;
734 			struct hlist_nulls_node *node;
735 			struct inet_listen_hashbucket *ilb;
736 
737 			num = 0;
738 			ilb = &hashinfo->listening_hash[i];
739 			spin_lock_bh(&ilb->lock);
740 			sk_nulls_for_each(sk, node, &ilb->head) {
741 				struct inet_sock *inet = inet_sk(sk);
742 
743 				if (num < s_num) {
744 					num++;
745 					continue;
746 				}
747 
748 				if (r->sdiag_family != AF_UNSPEC &&
749 						sk->sk_family != r->sdiag_family)
750 					goto next_listen;
751 
752 				if (r->id.idiag_sport != inet->inet_sport &&
753 				    r->id.idiag_sport)
754 					goto next_listen;
755 
756 				if (!(r->idiag_states & TCPF_LISTEN) ||
757 				    r->id.idiag_dport ||
758 				    cb->args[3] > 0)
759 					goto syn_recv;
760 
761 				if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
762 					spin_unlock_bh(&ilb->lock);
763 					goto done;
764 				}
765 
766 syn_recv:
767 				if (!(r->idiag_states & TCPF_SYN_RECV))
768 					goto next_listen;
769 
770 				if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
771 					spin_unlock_bh(&ilb->lock);
772 					goto done;
773 				}
774 
775 next_listen:
776 				cb->args[3] = 0;
777 				cb->args[4] = 0;
778 				++num;
779 			}
780 			spin_unlock_bh(&ilb->lock);
781 
782 			s_num = 0;
783 			cb->args[3] = 0;
784 			cb->args[4] = 0;
785 		}
786 skip_listen_ht:
787 		cb->args[0] = 1;
788 		s_i = num = s_num = 0;
789 	}
790 
791 	if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
792 		goto out;
793 
794 	for (i = s_i; i <= hashinfo->ehash_mask; i++) {
795 		struct inet_ehash_bucket *head = &hashinfo->ehash[i];
796 		spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
797 		struct sock *sk;
798 		struct hlist_nulls_node *node;
799 
800 		num = 0;
801 
802 		if (hlist_nulls_empty(&head->chain) &&
803 			hlist_nulls_empty(&head->twchain))
804 			continue;
805 
806 		if (i > s_i)
807 			s_num = 0;
808 
809 		spin_lock_bh(lock);
810 		sk_nulls_for_each(sk, node, &head->chain) {
811 			struct inet_sock *inet = inet_sk(sk);
812 
813 			if (num < s_num)
814 				goto next_normal;
815 			if (!(r->idiag_states & (1 << sk->sk_state)))
816 				goto next_normal;
817 			if (r->sdiag_family != AF_UNSPEC &&
818 					sk->sk_family != r->sdiag_family)
819 				goto next_normal;
820 			if (r->id.idiag_sport != inet->inet_sport &&
821 			    r->id.idiag_sport)
822 				goto next_normal;
823 			if (r->id.idiag_dport != inet->inet_dport &&
824 			    r->id.idiag_dport)
825 				goto next_normal;
826 			if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
827 				spin_unlock_bh(lock);
828 				goto done;
829 			}
830 next_normal:
831 			++num;
832 		}
833 
834 		if (r->idiag_states & TCPF_TIME_WAIT) {
835 			struct inet_timewait_sock *tw;
836 
837 			inet_twsk_for_each(tw, node,
838 				    &head->twchain) {
839 
840 				if (num < s_num)
841 					goto next_dying;
842 				if (r->sdiag_family != AF_UNSPEC &&
843 						tw->tw_family != r->sdiag_family)
844 					goto next_dying;
845 				if (r->id.idiag_sport != tw->tw_sport &&
846 				    r->id.idiag_sport)
847 					goto next_dying;
848 				if (r->id.idiag_dport != tw->tw_dport &&
849 				    r->id.idiag_dport)
850 					goto next_dying;
851 				if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
852 					spin_unlock_bh(lock);
853 					goto done;
854 				}
855 next_dying:
856 				++num;
857 			}
858 		}
859 		spin_unlock_bh(lock);
860 	}
861 
862 done:
863 	cb->args[1] = i;
864 	cb->args[2] = num;
865 out:
866 	;
867 }
868 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
869 
870 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
871 		struct inet_diag_req *r, struct nlattr *bc)
872 {
873 	const struct inet_diag_handler *handler;
874 
875 	handler = inet_diag_lock_handler(r->sdiag_protocol);
876 	if (!IS_ERR(handler))
877 		handler->dump(skb, cb, r, bc);
878 	inet_diag_unlock_handler(handler);
879 
880 	return skb->len;
881 }
882 
883 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
884 {
885 	struct nlattr *bc = NULL;
886 	int hdrlen = sizeof(struct inet_diag_req);
887 
888 	if (nlmsg_attrlen(cb->nlh, hdrlen))
889 		bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
890 
891 	return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
892 }
893 
894 static inline int inet_diag_type2proto(int type)
895 {
896 	switch (type) {
897 	case TCPDIAG_GETSOCK:
898 		return IPPROTO_TCP;
899 	case DCCPDIAG_GETSOCK:
900 		return IPPROTO_DCCP;
901 	default:
902 		return 0;
903 	}
904 }
905 
906 static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
907 {
908 	struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
909 	struct inet_diag_req req;
910 	struct nlattr *bc = NULL;
911 	int hdrlen = sizeof(struct inet_diag_req_compat);
912 
913 	req.sdiag_family = AF_UNSPEC; /* compatibility */
914 	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
915 	req.idiag_ext = rc->idiag_ext;
916 	req.idiag_states = rc->idiag_states;
917 	req.id = rc->id;
918 
919 	if (nlmsg_attrlen(cb->nlh, hdrlen))
920 		bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
921 
922 	return __inet_diag_dump(skb, cb, &req, bc);
923 }
924 
925 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
926 			       const struct nlmsghdr *nlh)
927 {
928 	struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
929 	struct inet_diag_req req;
930 
931 	req.sdiag_family = rc->idiag_family;
932 	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
933 	req.idiag_ext = rc->idiag_ext;
934 	req.idiag_states = rc->idiag_states;
935 	req.id = rc->id;
936 
937 	return inet_diag_get_exact(in_skb, nlh, &req);
938 }
939 
940 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
941 {
942 	int hdrlen = sizeof(struct inet_diag_req_compat);
943 
944 	if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
945 	    nlmsg_len(nlh) < hdrlen)
946 		return -EINVAL;
947 
948 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
949 		if (nlmsg_attrlen(nlh, hdrlen)) {
950 			struct nlattr *attr;
951 
952 			attr = nlmsg_find_attr(nlh, hdrlen,
953 					       INET_DIAG_REQ_BYTECODE);
954 			if (attr == NULL ||
955 			    nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
956 			    inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
957 				return -EINVAL;
958 		}
959 
960 		return netlink_dump_start(sock_diag_nlsk, skb, nlh,
961 					  inet_diag_dump_compat, NULL, 0);
962 	}
963 
964 	return inet_diag_get_exact_compat(skb, nlh);
965 }
966 
967 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
968 {
969 	int hdrlen = sizeof(struct inet_diag_req);
970 
971 	if (nlmsg_len(h) < hdrlen)
972 		return -EINVAL;
973 
974 	if (h->nlmsg_flags & NLM_F_DUMP) {
975 		if (nlmsg_attrlen(h, hdrlen)) {
976 			struct nlattr *attr;
977 			attr = nlmsg_find_attr(h, hdrlen,
978 					       INET_DIAG_REQ_BYTECODE);
979 			if (attr == NULL ||
980 			    nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
981 			    inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
982 				return -EINVAL;
983 		}
984 
985 		return netlink_dump_start(sock_diag_nlsk, skb, h,
986 					  inet_diag_dump, NULL, 0);
987 	}
988 
989 	return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
990 }
991 
992 static struct sock_diag_handler inet_diag_handler = {
993 	.family = AF_INET,
994 	.dump = inet_diag_handler_dump,
995 };
996 
997 static struct sock_diag_handler inet6_diag_handler = {
998 	.family = AF_INET6,
999 	.dump = inet_diag_handler_dump,
1000 };
1001 
1002 int inet_diag_register(const struct inet_diag_handler *h)
1003 {
1004 	const __u16 type = h->idiag_type;
1005 	int err = -EINVAL;
1006 
1007 	if (type >= IPPROTO_MAX)
1008 		goto out;
1009 
1010 	mutex_lock(&inet_diag_table_mutex);
1011 	err = -EEXIST;
1012 	if (inet_diag_table[type] == NULL) {
1013 		inet_diag_table[type] = h;
1014 		err = 0;
1015 	}
1016 	mutex_unlock(&inet_diag_table_mutex);
1017 out:
1018 	return err;
1019 }
1020 EXPORT_SYMBOL_GPL(inet_diag_register);
1021 
1022 void inet_diag_unregister(const struct inet_diag_handler *h)
1023 {
1024 	const __u16 type = h->idiag_type;
1025 
1026 	if (type >= IPPROTO_MAX)
1027 		return;
1028 
1029 	mutex_lock(&inet_diag_table_mutex);
1030 	inet_diag_table[type] = NULL;
1031 	mutex_unlock(&inet_diag_table_mutex);
1032 }
1033 EXPORT_SYMBOL_GPL(inet_diag_unregister);
1034 
1035 static int __init inet_diag_init(void)
1036 {
1037 	const int inet_diag_table_size = (IPPROTO_MAX *
1038 					  sizeof(struct inet_diag_handler *));
1039 	int err = -ENOMEM;
1040 
1041 	inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1042 	if (!inet_diag_table)
1043 		goto out;
1044 
1045 	err = sock_diag_register(&inet_diag_handler);
1046 	if (err)
1047 		goto out_free_nl;
1048 
1049 	err = sock_diag_register(&inet6_diag_handler);
1050 	if (err)
1051 		goto out_free_inet;
1052 
1053 	sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1054 out:
1055 	return err;
1056 
1057 out_free_inet:
1058 	sock_diag_unregister(&inet_diag_handler);
1059 out_free_nl:
1060 	kfree(inet_diag_table);
1061 	goto out;
1062 }
1063 
1064 static void __exit inet_diag_exit(void)
1065 {
1066 	sock_diag_unregister(&inet6_diag_handler);
1067 	sock_diag_unregister(&inet_diag_handler);
1068 	sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1069 	kfree(inet_diag_table);
1070 }
1071 
1072 module_init(inet_diag_init);
1073 module_exit(inet_diag_exit);
1074 MODULE_LICENSE("GPL");
1075 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1076 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
1077