1 /* 2 * inet_diag.c Module for monitoring INET transport protocols sockets. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/random.h> 17 #include <linux/slab.h> 18 #include <linux/cache.h> 19 #include <linux/init.h> 20 #include <linux/time.h> 21 22 #include <net/icmp.h> 23 #include <net/tcp.h> 24 #include <net/ipv6.h> 25 #include <net/inet_common.h> 26 #include <net/inet_connection_sock.h> 27 #include <net/inet_hashtables.h> 28 #include <net/inet_timewait_sock.h> 29 #include <net/inet6_hashtables.h> 30 #include <net/netlink.h> 31 32 #include <linux/inet.h> 33 #include <linux/stddef.h> 34 35 #include <linux/inet_diag.h> 36 #include <linux/sock_diag.h> 37 38 static const struct inet_diag_handler **inet_diag_table; 39 40 struct inet_diag_entry { 41 const __be32 *saddr; 42 const __be32 *daddr; 43 u16 sport; 44 u16 dport; 45 u16 family; 46 u16 userlocks; 47 }; 48 49 static DEFINE_MUTEX(inet_diag_table_mutex); 50 51 static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 52 { 53 if (!inet_diag_table[proto]) 54 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 55 NETLINK_SOCK_DIAG, AF_INET, proto); 56 57 mutex_lock(&inet_diag_table_mutex); 58 if (!inet_diag_table[proto]) 59 return ERR_PTR(-ENOENT); 60 61 return inet_diag_table[proto]; 62 } 63 64 static void inet_diag_unlock_handler(const struct inet_diag_handler *handler) 65 { 66 mutex_unlock(&inet_diag_table_mutex); 67 } 68 69 static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) 70 { 71 r->idiag_family = sk->sk_family; 72 73 r->id.idiag_sport = htons(sk->sk_num); 74 r->id.idiag_dport = sk->sk_dport; 75 r->id.idiag_if = sk->sk_bound_dev_if; 76 sock_diag_save_cookie(sk, r->id.idiag_cookie); 77 78 #if IS_ENABLED(CONFIG_IPV6) 79 if (sk->sk_family == AF_INET6) { 80 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; 81 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; 82 } else 83 #endif 84 { 85 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 86 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 87 88 r->id.idiag_src[0] = sk->sk_rcv_saddr; 89 r->id.idiag_dst[0] = sk->sk_daddr; 90 } 91 } 92 93 static size_t inet_sk_attr_size(void) 94 { 95 return nla_total_size(sizeof(struct tcp_info)) 96 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ 97 + nla_total_size(1) /* INET_DIAG_TOS */ 98 + nla_total_size(1) /* INET_DIAG_TCLASS */ 99 + nla_total_size(sizeof(struct inet_diag_meminfo)) 100 + nla_total_size(sizeof(struct inet_diag_msg)) 101 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) 102 + nla_total_size(TCP_CA_NAME_MAX) 103 + nla_total_size(sizeof(struct tcpvegas_info)) 104 + 64; 105 } 106 107 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 108 struct sk_buff *skb, const struct inet_diag_req_v2 *req, 109 struct user_namespace *user_ns, 110 u32 portid, u32 seq, u16 nlmsg_flags, 111 const struct nlmsghdr *unlh) 112 { 113 const struct inet_sock *inet = inet_sk(sk); 114 const struct tcp_congestion_ops *ca_ops; 115 const struct inet_diag_handler *handler; 116 int ext = req->idiag_ext; 117 struct inet_diag_msg *r; 118 struct nlmsghdr *nlh; 119 struct nlattr *attr; 120 void *info = NULL; 121 122 handler = inet_diag_table[req->sdiag_protocol]; 123 BUG_ON(!handler); 124 125 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 126 nlmsg_flags); 127 if (!nlh) 128 return -EMSGSIZE; 129 130 r = nlmsg_data(nlh); 131 BUG_ON(!sk_fullsock(sk)); 132 133 inet_diag_msg_common_fill(r, sk); 134 r->idiag_state = sk->sk_state; 135 r->idiag_timer = 0; 136 r->idiag_retrans = 0; 137 138 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) 139 goto errout; 140 141 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, 142 * hence this needs to be included regardless of socket family. 143 */ 144 if (ext & (1 << (INET_DIAG_TOS - 1))) 145 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0) 146 goto errout; 147 148 #if IS_ENABLED(CONFIG_IPV6) 149 if (r->idiag_family == AF_INET6) { 150 if (ext & (1 << (INET_DIAG_TCLASS - 1))) 151 if (nla_put_u8(skb, INET_DIAG_TCLASS, 152 inet6_sk(sk)->tclass) < 0) 153 goto errout; 154 155 if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && 156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk))) 157 goto errout; 158 } 159 #endif 160 161 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 162 r->idiag_inode = sock_i_ino(sk); 163 164 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { 165 struct inet_diag_meminfo minfo = { 166 .idiag_rmem = sk_rmem_alloc_get(sk), 167 .idiag_wmem = sk->sk_wmem_queued, 168 .idiag_fmem = sk->sk_forward_alloc, 169 .idiag_tmem = sk_wmem_alloc_get(sk), 170 }; 171 172 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0) 173 goto errout; 174 } 175 176 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) 177 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) 178 goto errout; 179 180 if (!icsk) { 181 handler->idiag_get_info(sk, r, NULL); 182 goto out; 183 } 184 185 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) 186 187 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 188 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 189 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 190 r->idiag_timer = 1; 191 r->idiag_retrans = icsk->icsk_retransmits; 192 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 193 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 194 r->idiag_timer = 4; 195 r->idiag_retrans = icsk->icsk_probes_out; 196 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 197 } else if (timer_pending(&sk->sk_timer)) { 198 r->idiag_timer = 2; 199 r->idiag_retrans = icsk->icsk_probes_out; 200 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); 201 } else { 202 r->idiag_timer = 0; 203 r->idiag_expires = 0; 204 } 205 #undef EXPIRES_IN_MS 206 207 if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) { 208 attr = nla_reserve(skb, INET_DIAG_INFO, 209 handler->idiag_info_size); 210 if (!attr) 211 goto errout; 212 213 info = nla_data(attr); 214 } 215 216 if (ext & (1 << (INET_DIAG_CONG - 1))) { 217 int err = 0; 218 219 rcu_read_lock(); 220 ca_ops = READ_ONCE(icsk->icsk_ca_ops); 221 if (ca_ops) 222 err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name); 223 rcu_read_unlock(); 224 if (err < 0) 225 goto errout; 226 } 227 228 handler->idiag_get_info(sk, r, info); 229 230 if (sk->sk_state < TCP_TIME_WAIT) { 231 union tcp_cc_info info; 232 size_t sz = 0; 233 int attr; 234 235 rcu_read_lock(); 236 ca_ops = READ_ONCE(icsk->icsk_ca_ops); 237 if (ca_ops && ca_ops->get_info) 238 sz = ca_ops->get_info(sk, ext, &attr, &info); 239 rcu_read_unlock(); 240 if (sz && nla_put(skb, attr, sz, &info) < 0) 241 goto errout; 242 } 243 244 out: 245 nlmsg_end(skb, nlh); 246 return 0; 247 248 errout: 249 nlmsg_cancel(skb, nlh); 250 return -EMSGSIZE; 251 } 252 EXPORT_SYMBOL_GPL(inet_sk_diag_fill); 253 254 static int inet_csk_diag_fill(struct sock *sk, 255 struct sk_buff *skb, 256 const struct inet_diag_req_v2 *req, 257 struct user_namespace *user_ns, 258 u32 portid, u32 seq, u16 nlmsg_flags, 259 const struct nlmsghdr *unlh) 260 { 261 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, 262 user_ns, portid, seq, nlmsg_flags, unlh); 263 } 264 265 static int inet_twsk_diag_fill(struct sock *sk, 266 struct sk_buff *skb, 267 u32 portid, u32 seq, u16 nlmsg_flags, 268 const struct nlmsghdr *unlh) 269 { 270 struct inet_timewait_sock *tw = inet_twsk(sk); 271 struct inet_diag_msg *r; 272 struct nlmsghdr *nlh; 273 long tmo; 274 275 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 276 nlmsg_flags); 277 if (!nlh) 278 return -EMSGSIZE; 279 280 r = nlmsg_data(nlh); 281 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 282 283 tmo = tw->tw_timer.expires - jiffies; 284 if (tmo < 0) 285 tmo = 0; 286 287 inet_diag_msg_common_fill(r, sk); 288 r->idiag_retrans = 0; 289 290 r->idiag_state = tw->tw_substate; 291 r->idiag_timer = 3; 292 r->idiag_expires = jiffies_to_msecs(tmo); 293 r->idiag_rqueue = 0; 294 r->idiag_wqueue = 0; 295 r->idiag_uid = 0; 296 r->idiag_inode = 0; 297 298 nlmsg_end(skb, nlh); 299 return 0; 300 } 301 302 static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, 303 u32 portid, u32 seq, u16 nlmsg_flags, 304 const struct nlmsghdr *unlh) 305 { 306 struct inet_diag_msg *r; 307 struct nlmsghdr *nlh; 308 long tmo; 309 310 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 311 nlmsg_flags); 312 if (!nlh) 313 return -EMSGSIZE; 314 315 r = nlmsg_data(nlh); 316 inet_diag_msg_common_fill(r, sk); 317 r->idiag_state = TCP_SYN_RECV; 318 r->idiag_timer = 1; 319 r->idiag_retrans = inet_reqsk(sk)->num_retrans; 320 321 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) != 322 offsetof(struct sock, sk_cookie)); 323 324 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies; 325 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0; 326 r->idiag_rqueue = 0; 327 r->idiag_wqueue = 0; 328 r->idiag_uid = 0; 329 r->idiag_inode = 0; 330 331 nlmsg_end(skb, nlh); 332 return 0; 333 } 334 335 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 336 const struct inet_diag_req_v2 *r, 337 struct user_namespace *user_ns, 338 u32 portid, u32 seq, u16 nlmsg_flags, 339 const struct nlmsghdr *unlh) 340 { 341 if (sk->sk_state == TCP_TIME_WAIT) 342 return inet_twsk_diag_fill(sk, skb, portid, seq, 343 nlmsg_flags, unlh); 344 345 if (sk->sk_state == TCP_NEW_SYN_RECV) 346 return inet_req_diag_fill(sk, skb, portid, seq, 347 nlmsg_flags, unlh); 348 349 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, 350 nlmsg_flags, unlh); 351 } 352 353 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, 354 struct sk_buff *in_skb, 355 const struct nlmsghdr *nlh, 356 const struct inet_diag_req_v2 *req) 357 { 358 struct net *net = sock_net(in_skb->sk); 359 struct sk_buff *rep; 360 struct sock *sk; 361 int err; 362 363 err = -EINVAL; 364 if (req->sdiag_family == AF_INET) 365 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0], 366 req->id.idiag_dport, req->id.idiag_src[0], 367 req->id.idiag_sport, req->id.idiag_if); 368 #if IS_ENABLED(CONFIG_IPV6) 369 else if (req->sdiag_family == AF_INET6) 370 sk = inet6_lookup(net, hashinfo, 371 (struct in6_addr *)req->id.idiag_dst, 372 req->id.idiag_dport, 373 (struct in6_addr *)req->id.idiag_src, 374 req->id.idiag_sport, 375 req->id.idiag_if); 376 #endif 377 else 378 goto out_nosk; 379 380 err = -ENOENT; 381 if (!sk) 382 goto out_nosk; 383 384 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); 385 if (err) 386 goto out; 387 388 rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); 389 if (!rep) { 390 err = -ENOMEM; 391 goto out; 392 } 393 394 err = sk_diag_fill(sk, rep, req, 395 sk_user_ns(NETLINK_CB(in_skb).sk), 396 NETLINK_CB(in_skb).portid, 397 nlh->nlmsg_seq, 0, nlh); 398 if (err < 0) { 399 WARN_ON(err == -EMSGSIZE); 400 nlmsg_free(rep); 401 goto out; 402 } 403 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, 404 MSG_DONTWAIT); 405 if (err > 0) 406 err = 0; 407 408 out: 409 if (sk) 410 sock_gen_put(sk); 411 412 out_nosk: 413 return err; 414 } 415 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); 416 417 static int inet_diag_get_exact(struct sk_buff *in_skb, 418 const struct nlmsghdr *nlh, 419 const struct inet_diag_req_v2 *req) 420 { 421 const struct inet_diag_handler *handler; 422 int err; 423 424 handler = inet_diag_lock_handler(req->sdiag_protocol); 425 if (IS_ERR(handler)) 426 err = PTR_ERR(handler); 427 else 428 err = handler->dump_one(in_skb, nlh, req); 429 inet_diag_unlock_handler(handler); 430 431 return err; 432 } 433 434 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) 435 { 436 int words = bits >> 5; 437 438 bits &= 0x1f; 439 440 if (words) { 441 if (memcmp(a1, a2, words << 2)) 442 return 0; 443 } 444 if (bits) { 445 __be32 w1, w2; 446 __be32 mask; 447 448 w1 = a1[words]; 449 w2 = a2[words]; 450 451 mask = htonl((0xffffffff) << (32 - bits)); 452 453 if ((w1 ^ w2) & mask) 454 return 0; 455 } 456 457 return 1; 458 } 459 460 static int inet_diag_bc_run(const struct nlattr *_bc, 461 const struct inet_diag_entry *entry) 462 { 463 const void *bc = nla_data(_bc); 464 int len = nla_len(_bc); 465 466 while (len > 0) { 467 int yes = 1; 468 const struct inet_diag_bc_op *op = bc; 469 470 switch (op->code) { 471 case INET_DIAG_BC_NOP: 472 break; 473 case INET_DIAG_BC_JMP: 474 yes = 0; 475 break; 476 case INET_DIAG_BC_S_GE: 477 yes = entry->sport >= op[1].no; 478 break; 479 case INET_DIAG_BC_S_LE: 480 yes = entry->sport <= op[1].no; 481 break; 482 case INET_DIAG_BC_D_GE: 483 yes = entry->dport >= op[1].no; 484 break; 485 case INET_DIAG_BC_D_LE: 486 yes = entry->dport <= op[1].no; 487 break; 488 case INET_DIAG_BC_AUTO: 489 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK); 490 break; 491 case INET_DIAG_BC_S_COND: 492 case INET_DIAG_BC_D_COND: { 493 const struct inet_diag_hostcond *cond; 494 const __be32 *addr; 495 496 cond = (const struct inet_diag_hostcond *)(op + 1); 497 if (cond->port != -1 && 498 cond->port != (op->code == INET_DIAG_BC_S_COND ? 499 entry->sport : entry->dport)) { 500 yes = 0; 501 break; 502 } 503 504 if (op->code == INET_DIAG_BC_S_COND) 505 addr = entry->saddr; 506 else 507 addr = entry->daddr; 508 509 if (cond->family != AF_UNSPEC && 510 cond->family != entry->family) { 511 if (entry->family == AF_INET6 && 512 cond->family == AF_INET) { 513 if (addr[0] == 0 && addr[1] == 0 && 514 addr[2] == htonl(0xffff) && 515 bitstring_match(addr + 3, 516 cond->addr, 517 cond->prefix_len)) 518 break; 519 } 520 yes = 0; 521 break; 522 } 523 524 if (cond->prefix_len == 0) 525 break; 526 if (bitstring_match(addr, cond->addr, 527 cond->prefix_len)) 528 break; 529 yes = 0; 530 break; 531 } 532 } 533 534 if (yes) { 535 len -= op->yes; 536 bc += op->yes; 537 } else { 538 len -= op->no; 539 bc += op->no; 540 } 541 } 542 return len == 0; 543 } 544 545 /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV) 546 */ 547 static void entry_fill_addrs(struct inet_diag_entry *entry, 548 const struct sock *sk) 549 { 550 #if IS_ENABLED(CONFIG_IPV6) 551 if (sk->sk_family == AF_INET6) { 552 entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32; 553 entry->daddr = sk->sk_v6_daddr.s6_addr32; 554 } else 555 #endif 556 { 557 entry->saddr = &sk->sk_rcv_saddr; 558 entry->daddr = &sk->sk_daddr; 559 } 560 } 561 562 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) 563 { 564 struct inet_sock *inet = inet_sk(sk); 565 struct inet_diag_entry entry; 566 567 if (!bc) 568 return 1; 569 570 entry.family = sk->sk_family; 571 entry_fill_addrs(&entry, sk); 572 entry.sport = inet->inet_num; 573 entry.dport = ntohs(inet->inet_dport); 574 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; 575 576 return inet_diag_bc_run(bc, &entry); 577 } 578 EXPORT_SYMBOL_GPL(inet_diag_bc_sk); 579 580 static int valid_cc(const void *bc, int len, int cc) 581 { 582 while (len >= 0) { 583 const struct inet_diag_bc_op *op = bc; 584 585 if (cc > len) 586 return 0; 587 if (cc == len) 588 return 1; 589 if (op->yes < 4 || op->yes & 3) 590 return 0; 591 len -= op->yes; 592 bc += op->yes; 593 } 594 return 0; 595 } 596 597 /* Validate an inet_diag_hostcond. */ 598 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, 599 int *min_len) 600 { 601 struct inet_diag_hostcond *cond; 602 int addr_len; 603 604 /* Check hostcond space. */ 605 *min_len += sizeof(struct inet_diag_hostcond); 606 if (len < *min_len) 607 return false; 608 cond = (struct inet_diag_hostcond *)(op + 1); 609 610 /* Check address family and address length. */ 611 switch (cond->family) { 612 case AF_UNSPEC: 613 addr_len = 0; 614 break; 615 case AF_INET: 616 addr_len = sizeof(struct in_addr); 617 break; 618 case AF_INET6: 619 addr_len = sizeof(struct in6_addr); 620 break; 621 default: 622 return false; 623 } 624 *min_len += addr_len; 625 if (len < *min_len) 626 return false; 627 628 /* Check prefix length (in bits) vs address length (in bytes). */ 629 if (cond->prefix_len > 8 * addr_len) 630 return false; 631 632 return true; 633 } 634 635 /* Validate a port comparison operator. */ 636 static bool valid_port_comparison(const struct inet_diag_bc_op *op, 637 int len, int *min_len) 638 { 639 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */ 640 *min_len += sizeof(struct inet_diag_bc_op); 641 if (len < *min_len) 642 return false; 643 return true; 644 } 645 646 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 647 { 648 const void *bc = bytecode; 649 int len = bytecode_len; 650 651 while (len > 0) { 652 int min_len = sizeof(struct inet_diag_bc_op); 653 const struct inet_diag_bc_op *op = bc; 654 655 switch (op->code) { 656 case INET_DIAG_BC_S_COND: 657 case INET_DIAG_BC_D_COND: 658 if (!valid_hostcond(bc, len, &min_len)) 659 return -EINVAL; 660 break; 661 case INET_DIAG_BC_S_GE: 662 case INET_DIAG_BC_S_LE: 663 case INET_DIAG_BC_D_GE: 664 case INET_DIAG_BC_D_LE: 665 if (!valid_port_comparison(bc, len, &min_len)) 666 return -EINVAL; 667 break; 668 case INET_DIAG_BC_AUTO: 669 case INET_DIAG_BC_JMP: 670 case INET_DIAG_BC_NOP: 671 break; 672 default: 673 return -EINVAL; 674 } 675 676 if (op->code != INET_DIAG_BC_NOP) { 677 if (op->no < min_len || op->no > len + 4 || op->no & 3) 678 return -EINVAL; 679 if (op->no < len && 680 !valid_cc(bytecode, bytecode_len, len - op->no)) 681 return -EINVAL; 682 } 683 684 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3) 685 return -EINVAL; 686 bc += op->yes; 687 len -= op->yes; 688 } 689 return len == 0 ? 0 : -EINVAL; 690 } 691 692 static int inet_csk_diag_dump(struct sock *sk, 693 struct sk_buff *skb, 694 struct netlink_callback *cb, 695 const struct inet_diag_req_v2 *r, 696 const struct nlattr *bc) 697 { 698 if (!inet_diag_bc_sk(bc, sk)) 699 return 0; 700 701 return inet_csk_diag_fill(sk, skb, r, 702 sk_user_ns(NETLINK_CB(cb->skb).sk), 703 NETLINK_CB(cb->skb).portid, 704 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 705 } 706 707 static void twsk_build_assert(void) 708 { 709 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) != 710 offsetof(struct sock, sk_family)); 711 712 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) != 713 offsetof(struct inet_sock, inet_num)); 714 715 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) != 716 offsetof(struct inet_sock, inet_dport)); 717 718 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) != 719 offsetof(struct inet_sock, inet_rcv_saddr)); 720 721 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) != 722 offsetof(struct inet_sock, inet_daddr)); 723 724 #if IS_ENABLED(CONFIG_IPV6) 725 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) != 726 offsetof(struct sock, sk_v6_rcv_saddr)); 727 728 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) != 729 offsetof(struct sock, sk_v6_daddr)); 730 #endif 731 } 732 733 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 734 struct netlink_callback *cb, 735 const struct inet_diag_req_v2 *r, 736 const struct nlattr *bc) 737 { 738 struct inet_connection_sock *icsk = inet_csk(sk); 739 struct inet_sock *inet = inet_sk(sk); 740 struct inet_diag_entry entry; 741 int j, s_j, reqnum, s_reqnum; 742 struct listen_sock *lopt; 743 int err = 0; 744 745 s_j = cb->args[3]; 746 s_reqnum = cb->args[4]; 747 748 if (s_j > 0) 749 s_j--; 750 751 entry.family = sk->sk_family; 752 753 spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); 754 755 lopt = icsk->icsk_accept_queue.listen_opt; 756 if (!lopt || !listen_sock_qlen(lopt)) 757 goto out; 758 759 if (bc) { 760 entry.sport = inet->inet_num; 761 entry.userlocks = sk->sk_userlocks; 762 } 763 764 for (j = s_j; j < lopt->nr_table_entries; j++) { 765 struct request_sock *req, *head = lopt->syn_table[j]; 766 767 reqnum = 0; 768 for (req = head; req; reqnum++, req = req->dl_next) { 769 struct inet_request_sock *ireq = inet_rsk(req); 770 771 if (reqnum < s_reqnum) 772 continue; 773 if (r->id.idiag_dport != ireq->ir_rmt_port && 774 r->id.idiag_dport) 775 continue; 776 777 if (bc) { 778 /* Note: entry.sport and entry.userlocks are already set */ 779 entry_fill_addrs(&entry, req_to_sk(req)); 780 entry.dport = ntohs(ireq->ir_rmt_port); 781 782 if (!inet_diag_bc_run(bc, &entry)) 783 continue; 784 } 785 786 err = inet_req_diag_fill(req_to_sk(req), skb, 787 NETLINK_CB(cb->skb).portid, 788 cb->nlh->nlmsg_seq, 789 NLM_F_MULTI, cb->nlh); 790 if (err < 0) { 791 cb->args[3] = j + 1; 792 cb->args[4] = reqnum; 793 goto out; 794 } 795 } 796 797 s_reqnum = 0; 798 } 799 800 out: 801 spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); 802 803 return err; 804 } 805 806 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, 807 struct netlink_callback *cb, 808 const struct inet_diag_req_v2 *r, struct nlattr *bc) 809 { 810 struct net *net = sock_net(skb->sk); 811 int i, num, s_i, s_num; 812 813 s_i = cb->args[1]; 814 s_num = num = cb->args[2]; 815 816 if (cb->args[0] == 0) { 817 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) 818 goto skip_listen_ht; 819 820 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { 821 struct inet_listen_hashbucket *ilb; 822 struct hlist_nulls_node *node; 823 struct sock *sk; 824 825 num = 0; 826 ilb = &hashinfo->listening_hash[i]; 827 spin_lock_bh(&ilb->lock); 828 sk_nulls_for_each(sk, node, &ilb->head) { 829 struct inet_sock *inet = inet_sk(sk); 830 831 if (!net_eq(sock_net(sk), net)) 832 continue; 833 834 if (num < s_num) { 835 num++; 836 continue; 837 } 838 839 if (r->sdiag_family != AF_UNSPEC && 840 sk->sk_family != r->sdiag_family) 841 goto next_listen; 842 843 if (r->id.idiag_sport != inet->inet_sport && 844 r->id.idiag_sport) 845 goto next_listen; 846 847 if (!(r->idiag_states & TCPF_LISTEN) || 848 r->id.idiag_dport || 849 cb->args[3] > 0) 850 goto syn_recv; 851 852 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { 853 spin_unlock_bh(&ilb->lock); 854 goto done; 855 } 856 857 syn_recv: 858 if (!(r->idiag_states & TCPF_SYN_RECV)) 859 goto next_listen; 860 861 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) { 862 spin_unlock_bh(&ilb->lock); 863 goto done; 864 } 865 866 next_listen: 867 cb->args[3] = 0; 868 cb->args[4] = 0; 869 ++num; 870 } 871 spin_unlock_bh(&ilb->lock); 872 873 s_num = 0; 874 cb->args[3] = 0; 875 cb->args[4] = 0; 876 } 877 skip_listen_ht: 878 cb->args[0] = 1; 879 s_i = num = s_num = 0; 880 } 881 882 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) 883 goto out; 884 885 for (i = s_i; i <= hashinfo->ehash_mask; i++) { 886 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; 887 spinlock_t *lock = inet_ehash_lockp(hashinfo, i); 888 struct hlist_nulls_node *node; 889 struct sock *sk; 890 891 num = 0; 892 893 if (hlist_nulls_empty(&head->chain)) 894 continue; 895 896 if (i > s_i) 897 s_num = 0; 898 899 spin_lock_bh(lock); 900 sk_nulls_for_each(sk, node, &head->chain) { 901 int state, res; 902 903 if (!net_eq(sock_net(sk), net)) 904 continue; 905 if (num < s_num) 906 goto next_normal; 907 state = (sk->sk_state == TCP_TIME_WAIT) ? 908 inet_twsk(sk)->tw_substate : sk->sk_state; 909 if (!(r->idiag_states & (1 << state))) 910 goto next_normal; 911 if (r->sdiag_family != AF_UNSPEC && 912 sk->sk_family != r->sdiag_family) 913 goto next_normal; 914 if (r->id.idiag_sport != htons(sk->sk_num) && 915 r->id.idiag_sport) 916 goto next_normal; 917 if (r->id.idiag_dport != sk->sk_dport && 918 r->id.idiag_dport) 919 goto next_normal; 920 twsk_build_assert(); 921 922 if (!inet_diag_bc_sk(bc, sk)) 923 goto next_normal; 924 925 res = sk_diag_fill(sk, skb, r, 926 sk_user_ns(NETLINK_CB(cb->skb).sk), 927 NETLINK_CB(cb->skb).portid, 928 cb->nlh->nlmsg_seq, NLM_F_MULTI, 929 cb->nlh); 930 if (res < 0) { 931 spin_unlock_bh(lock); 932 goto done; 933 } 934 next_normal: 935 ++num; 936 } 937 938 spin_unlock_bh(lock); 939 } 940 941 done: 942 cb->args[1] = i; 943 cb->args[2] = num; 944 out: 945 ; 946 } 947 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); 948 949 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, 950 const struct inet_diag_req_v2 *r, 951 struct nlattr *bc) 952 { 953 const struct inet_diag_handler *handler; 954 int err = 0; 955 956 handler = inet_diag_lock_handler(r->sdiag_protocol); 957 if (!IS_ERR(handler)) 958 handler->dump(skb, cb, r, bc); 959 else 960 err = PTR_ERR(handler); 961 inet_diag_unlock_handler(handler); 962 963 return err ? : skb->len; 964 } 965 966 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 967 { 968 int hdrlen = sizeof(struct inet_diag_req_v2); 969 struct nlattr *bc = NULL; 970 971 if (nlmsg_attrlen(cb->nlh, hdrlen)) 972 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 973 974 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc); 975 } 976 977 static int inet_diag_type2proto(int type) 978 { 979 switch (type) { 980 case TCPDIAG_GETSOCK: 981 return IPPROTO_TCP; 982 case DCCPDIAG_GETSOCK: 983 return IPPROTO_DCCP; 984 default: 985 return 0; 986 } 987 } 988 989 static int inet_diag_dump_compat(struct sk_buff *skb, 990 struct netlink_callback *cb) 991 { 992 struct inet_diag_req *rc = nlmsg_data(cb->nlh); 993 int hdrlen = sizeof(struct inet_diag_req); 994 struct inet_diag_req_v2 req; 995 struct nlattr *bc = NULL; 996 997 req.sdiag_family = AF_UNSPEC; /* compatibility */ 998 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); 999 req.idiag_ext = rc->idiag_ext; 1000 req.idiag_states = rc->idiag_states; 1001 req.id = rc->id; 1002 1003 if (nlmsg_attrlen(cb->nlh, hdrlen)) 1004 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 1005 1006 return __inet_diag_dump(skb, cb, &req, bc); 1007 } 1008 1009 static int inet_diag_get_exact_compat(struct sk_buff *in_skb, 1010 const struct nlmsghdr *nlh) 1011 { 1012 struct inet_diag_req *rc = nlmsg_data(nlh); 1013 struct inet_diag_req_v2 req; 1014 1015 req.sdiag_family = rc->idiag_family; 1016 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type); 1017 req.idiag_ext = rc->idiag_ext; 1018 req.idiag_states = rc->idiag_states; 1019 req.id = rc->id; 1020 1021 return inet_diag_get_exact(in_skb, nlh, &req); 1022 } 1023 1024 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) 1025 { 1026 int hdrlen = sizeof(struct inet_diag_req); 1027 struct net *net = sock_net(skb->sk); 1028 1029 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || 1030 nlmsg_len(nlh) < hdrlen) 1031 return -EINVAL; 1032 1033 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1034 if (nlmsg_attrlen(nlh, hdrlen)) { 1035 struct nlattr *attr; 1036 1037 attr = nlmsg_find_attr(nlh, hdrlen, 1038 INET_DIAG_REQ_BYTECODE); 1039 if (!attr || 1040 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 1041 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 1042 return -EINVAL; 1043 } 1044 { 1045 struct netlink_dump_control c = { 1046 .dump = inet_diag_dump_compat, 1047 }; 1048 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); 1049 } 1050 } 1051 1052 return inet_diag_get_exact_compat(skb, nlh); 1053 } 1054 1055 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 1056 { 1057 int hdrlen = sizeof(struct inet_diag_req_v2); 1058 struct net *net = sock_net(skb->sk); 1059 1060 if (nlmsg_len(h) < hdrlen) 1061 return -EINVAL; 1062 1063 if (h->nlmsg_flags & NLM_F_DUMP) { 1064 if (nlmsg_attrlen(h, hdrlen)) { 1065 struct nlattr *attr; 1066 1067 attr = nlmsg_find_attr(h, hdrlen, 1068 INET_DIAG_REQ_BYTECODE); 1069 if (!attr || 1070 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 1071 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 1072 return -EINVAL; 1073 } 1074 { 1075 struct netlink_dump_control c = { 1076 .dump = inet_diag_dump, 1077 }; 1078 return netlink_dump_start(net->diag_nlsk, skb, h, &c); 1079 } 1080 } 1081 1082 return inet_diag_get_exact(skb, h, nlmsg_data(h)); 1083 } 1084 1085 static 1086 int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk) 1087 { 1088 const struct inet_diag_handler *handler; 1089 struct nlmsghdr *nlh; 1090 struct nlattr *attr; 1091 struct inet_diag_msg *r; 1092 void *info = NULL; 1093 int err = 0; 1094 1095 nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0); 1096 if (!nlh) 1097 return -ENOMEM; 1098 1099 r = nlmsg_data(nlh); 1100 memset(r, 0, sizeof(*r)); 1101 inet_diag_msg_common_fill(r, sk); 1102 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM) 1103 r->id.idiag_sport = inet_sk(sk)->inet_sport; 1104 r->idiag_state = sk->sk_state; 1105 1106 if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) { 1107 nlmsg_cancel(skb, nlh); 1108 return err; 1109 } 1110 1111 handler = inet_diag_lock_handler(sk->sk_protocol); 1112 if (IS_ERR(handler)) { 1113 inet_diag_unlock_handler(handler); 1114 nlmsg_cancel(skb, nlh); 1115 return PTR_ERR(handler); 1116 } 1117 1118 attr = handler->idiag_info_size 1119 ? nla_reserve(skb, INET_DIAG_INFO, handler->idiag_info_size) 1120 : NULL; 1121 if (attr) 1122 info = nla_data(attr); 1123 1124 handler->idiag_get_info(sk, r, info); 1125 inet_diag_unlock_handler(handler); 1126 1127 nlmsg_end(skb, nlh); 1128 return 0; 1129 } 1130 1131 static const struct sock_diag_handler inet_diag_handler = { 1132 .family = AF_INET, 1133 .dump = inet_diag_handler_dump, 1134 .get_info = inet_diag_handler_get_info, 1135 }; 1136 1137 static const struct sock_diag_handler inet6_diag_handler = { 1138 .family = AF_INET6, 1139 .dump = inet_diag_handler_dump, 1140 .get_info = inet_diag_handler_get_info, 1141 }; 1142 1143 int inet_diag_register(const struct inet_diag_handler *h) 1144 { 1145 const __u16 type = h->idiag_type; 1146 int err = -EINVAL; 1147 1148 if (type >= IPPROTO_MAX) 1149 goto out; 1150 1151 mutex_lock(&inet_diag_table_mutex); 1152 err = -EEXIST; 1153 if (!inet_diag_table[type]) { 1154 inet_diag_table[type] = h; 1155 err = 0; 1156 } 1157 mutex_unlock(&inet_diag_table_mutex); 1158 out: 1159 return err; 1160 } 1161 EXPORT_SYMBOL_GPL(inet_diag_register); 1162 1163 void inet_diag_unregister(const struct inet_diag_handler *h) 1164 { 1165 const __u16 type = h->idiag_type; 1166 1167 if (type >= IPPROTO_MAX) 1168 return; 1169 1170 mutex_lock(&inet_diag_table_mutex); 1171 inet_diag_table[type] = NULL; 1172 mutex_unlock(&inet_diag_table_mutex); 1173 } 1174 EXPORT_SYMBOL_GPL(inet_diag_unregister); 1175 1176 static int __init inet_diag_init(void) 1177 { 1178 const int inet_diag_table_size = (IPPROTO_MAX * 1179 sizeof(struct inet_diag_handler *)); 1180 int err = -ENOMEM; 1181 1182 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); 1183 if (!inet_diag_table) 1184 goto out; 1185 1186 err = sock_diag_register(&inet_diag_handler); 1187 if (err) 1188 goto out_free_nl; 1189 1190 err = sock_diag_register(&inet6_diag_handler); 1191 if (err) 1192 goto out_free_inet; 1193 1194 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat); 1195 out: 1196 return err; 1197 1198 out_free_inet: 1199 sock_diag_unregister(&inet_diag_handler); 1200 out_free_nl: 1201 kfree(inet_diag_table); 1202 goto out; 1203 } 1204 1205 static void __exit inet_diag_exit(void) 1206 { 1207 sock_diag_unregister(&inet6_diag_handler); 1208 sock_diag_unregister(&inet_diag_handler); 1209 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat); 1210 kfree(inet_diag_table); 1211 } 1212 1213 module_init(inet_diag_init); 1214 module_exit(inet_diag_exit); 1215 MODULE_LICENSE("GPL"); 1216 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */); 1217 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */); 1218