1 /* 2 * inet_diag.c Module for monitoring INET transport protocols sockets. 3 * 4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $ 5 * 6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/fcntl.h> 17 #include <linux/random.h> 18 #include <linux/cache.h> 19 #include <linux/init.h> 20 #include <linux/time.h> 21 22 #include <net/icmp.h> 23 #include <net/tcp.h> 24 #include <net/ipv6.h> 25 #include <net/inet_common.h> 26 #include <net/inet_connection_sock.h> 27 #include <net/inet_hashtables.h> 28 #include <net/inet_timewait_sock.h> 29 #include <net/inet6_hashtables.h> 30 31 #include <linux/inet.h> 32 #include <linux/stddef.h> 33 34 #include <linux/inet_diag.h> 35 36 static const struct inet_diag_handler **inet_diag_table; 37 38 struct inet_diag_entry { 39 u32 *saddr; 40 u32 *daddr; 41 u16 sport; 42 u16 dport; 43 u16 family; 44 u16 userlocks; 45 }; 46 47 static struct sock *idiagnl; 48 49 #define INET_DIAG_PUT(skb, attrtype, attrlen) \ 50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 51 52 static int inet_csk_diag_fill(struct sock *sk, 53 struct sk_buff *skb, 54 int ext, u32 pid, u32 seq, u16 nlmsg_flags, 55 const struct nlmsghdr *unlh) 56 { 57 const struct inet_sock *inet = inet_sk(sk); 58 const struct inet_connection_sock *icsk = inet_csk(sk); 59 struct inet_diag_msg *r; 60 struct nlmsghdr *nlh; 61 void *info = NULL; 62 struct inet_diag_meminfo *minfo = NULL; 63 unsigned char *b = skb->tail; 64 const struct inet_diag_handler *handler; 65 66 handler = inet_diag_table[unlh->nlmsg_type]; 67 BUG_ON(handler == NULL); 68 69 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 70 nlh->nlmsg_flags = nlmsg_flags; 71 72 r = NLMSG_DATA(nlh); 73 BUG_ON(sk->sk_state == TCP_TIME_WAIT); 74 75 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 76 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); 77 78 if (ext & (1 << (INET_DIAG_INFO - 1))) 79 info = INET_DIAG_PUT(skb, INET_DIAG_INFO, 80 handler->idiag_info_size); 81 82 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 83 const size_t len = strlen(icsk->icsk_ca_ops->name); 84 85 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 86 icsk->icsk_ca_ops->name); 87 } 88 89 r->idiag_family = sk->sk_family; 90 r->idiag_state = sk->sk_state; 91 r->idiag_timer = 0; 92 r->idiag_retrans = 0; 93 94 r->id.idiag_if = sk->sk_bound_dev_if; 95 r->id.idiag_cookie[0] = (u32)(unsigned long)sk; 96 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); 97 98 r->id.idiag_sport = inet->sport; 99 r->id.idiag_dport = inet->dport; 100 r->id.idiag_src[0] = inet->rcv_saddr; 101 r->id.idiag_dst[0] = inet->daddr; 102 103 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 104 if (r->idiag_family == AF_INET6) { 105 struct ipv6_pinfo *np = inet6_sk(sk); 106 107 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 108 &np->rcv_saddr); 109 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 110 &np->daddr); 111 } 112 #endif 113 114 #define EXPIRES_IN_MS(tmo) ((tmo - jiffies) * 1000 + HZ - 1) / HZ 115 116 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 117 r->idiag_timer = 1; 118 r->idiag_retrans = icsk->icsk_retransmits; 119 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 120 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 121 r->idiag_timer = 4; 122 r->idiag_retrans = icsk->icsk_probes_out; 123 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 124 } else if (timer_pending(&sk->sk_timer)) { 125 r->idiag_timer = 2; 126 r->idiag_retrans = icsk->icsk_probes_out; 127 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); 128 } else { 129 r->idiag_timer = 0; 130 r->idiag_expires = 0; 131 } 132 #undef EXPIRES_IN_MS 133 134 r->idiag_uid = sock_i_uid(sk); 135 r->idiag_inode = sock_i_ino(sk); 136 137 if (minfo) { 138 minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc); 139 minfo->idiag_wmem = sk->sk_wmem_queued; 140 minfo->idiag_fmem = sk->sk_forward_alloc; 141 minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc); 142 } 143 144 handler->idiag_get_info(sk, r, info); 145 146 if (sk->sk_state < TCP_TIME_WAIT && 147 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 148 icsk->icsk_ca_ops->get_info(sk, ext, skb); 149 150 nlh->nlmsg_len = skb->tail - b; 151 return skb->len; 152 153 rtattr_failure: 154 nlmsg_failure: 155 skb_trim(skb, b - skb->data); 156 return -1; 157 } 158 159 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 160 struct sk_buff *skb, int ext, u32 pid, 161 u32 seq, u16 nlmsg_flags, 162 const struct nlmsghdr *unlh) 163 { 164 long tmo; 165 struct inet_diag_msg *r; 166 const unsigned char *previous_tail = skb->tail; 167 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, 168 unlh->nlmsg_type, sizeof(*r)); 169 170 r = NLMSG_DATA(nlh); 171 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 172 173 nlh->nlmsg_flags = nlmsg_flags; 174 175 tmo = tw->tw_ttd - jiffies; 176 if (tmo < 0) 177 tmo = 0; 178 179 r->idiag_family = tw->tw_family; 180 r->idiag_state = tw->tw_state; 181 r->idiag_timer = 0; 182 r->idiag_retrans = 0; 183 r->id.idiag_if = tw->tw_bound_dev_if; 184 r->id.idiag_cookie[0] = (u32)(unsigned long)tw; 185 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); 186 r->id.idiag_sport = tw->tw_sport; 187 r->id.idiag_dport = tw->tw_dport; 188 r->id.idiag_src[0] = tw->tw_rcv_saddr; 189 r->id.idiag_dst[0] = tw->tw_daddr; 190 r->idiag_state = tw->tw_substate; 191 r->idiag_timer = 3; 192 r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; 193 r->idiag_rqueue = 0; 194 r->idiag_wqueue = 0; 195 r->idiag_uid = 0; 196 r->idiag_inode = 0; 197 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 198 if (tw->tw_family == AF_INET6) { 199 const struct inet6_timewait_sock *tw6 = 200 inet6_twsk((struct sock *)tw); 201 202 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 203 &tw6->tw_v6_rcv_saddr); 204 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 205 &tw6->tw_v6_daddr); 206 } 207 #endif 208 nlh->nlmsg_len = skb->tail - previous_tail; 209 return skb->len; 210 nlmsg_failure: 211 skb_trim(skb, previous_tail - skb->data); 212 return -1; 213 } 214 215 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 216 int ext, u32 pid, u32 seq, u16 nlmsg_flags, 217 const struct nlmsghdr *unlh) 218 { 219 if (sk->sk_state == TCP_TIME_WAIT) 220 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 221 skb, ext, pid, seq, nlmsg_flags, 222 unlh); 223 return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh); 224 } 225 226 static int inet_diag_get_exact(struct sk_buff *in_skb, 227 const struct nlmsghdr *nlh) 228 { 229 int err; 230 struct sock *sk; 231 struct inet_diag_req *req = NLMSG_DATA(nlh); 232 struct sk_buff *rep; 233 struct inet_hashinfo *hashinfo; 234 const struct inet_diag_handler *handler; 235 236 handler = inet_diag_table[nlh->nlmsg_type]; 237 BUG_ON(handler == NULL); 238 hashinfo = handler->idiag_hashinfo; 239 240 if (req->idiag_family == AF_INET) { 241 sk = inet_lookup(hashinfo, req->id.idiag_dst[0], 242 req->id.idiag_dport, req->id.idiag_src[0], 243 req->id.idiag_sport, req->id.idiag_if); 244 } 245 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 246 else if (req->idiag_family == AF_INET6) { 247 sk = inet6_lookup(hashinfo, 248 (struct in6_addr *)req->id.idiag_dst, 249 req->id.idiag_dport, 250 (struct in6_addr *)req->id.idiag_src, 251 req->id.idiag_sport, 252 req->id.idiag_if); 253 } 254 #endif 255 else { 256 return -EINVAL; 257 } 258 259 if (sk == NULL) 260 return -ENOENT; 261 262 err = -ESTALE; 263 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || 264 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) && 265 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] || 266 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1])) 267 goto out; 268 269 err = -ENOMEM; 270 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + 271 sizeof(struct inet_diag_meminfo) + 272 handler->idiag_info_size + 64)), 273 GFP_KERNEL); 274 if (!rep) 275 goto out; 276 277 if (sk_diag_fill(sk, rep, req->idiag_ext, 278 NETLINK_CB(in_skb).pid, 279 nlh->nlmsg_seq, 0, nlh) <= 0) 280 BUG(); 281 282 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid, 283 MSG_DONTWAIT); 284 if (err > 0) 285 err = 0; 286 287 out: 288 if (sk) { 289 if (sk->sk_state == TCP_TIME_WAIT) 290 inet_twsk_put((struct inet_timewait_sock *)sk); 291 else 292 sock_put(sk); 293 } 294 return err; 295 } 296 297 static int bitstring_match(const u32 *a1, const u32 *a2, int bits) 298 { 299 int words = bits >> 5; 300 301 bits &= 0x1f; 302 303 if (words) { 304 if (memcmp(a1, a2, words << 2)) 305 return 0; 306 } 307 if (bits) { 308 __u32 w1, w2; 309 __u32 mask; 310 311 w1 = a1[words]; 312 w2 = a2[words]; 313 314 mask = htonl((0xffffffff) << (32 - bits)); 315 316 if ((w1 ^ w2) & mask) 317 return 0; 318 } 319 320 return 1; 321 } 322 323 324 static int inet_diag_bc_run(const void *bc, int len, 325 const struct inet_diag_entry *entry) 326 { 327 while (len > 0) { 328 int yes = 1; 329 const struct inet_diag_bc_op *op = bc; 330 331 switch (op->code) { 332 case INET_DIAG_BC_NOP: 333 break; 334 case INET_DIAG_BC_JMP: 335 yes = 0; 336 break; 337 case INET_DIAG_BC_S_GE: 338 yes = entry->sport >= op[1].no; 339 break; 340 case INET_DIAG_BC_S_LE: 341 yes = entry->dport <= op[1].no; 342 break; 343 case INET_DIAG_BC_D_GE: 344 yes = entry->dport >= op[1].no; 345 break; 346 case INET_DIAG_BC_D_LE: 347 yes = entry->dport <= op[1].no; 348 break; 349 case INET_DIAG_BC_AUTO: 350 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK); 351 break; 352 case INET_DIAG_BC_S_COND: 353 case INET_DIAG_BC_D_COND: { 354 struct inet_diag_hostcond *cond; 355 u32 *addr; 356 357 cond = (struct inet_diag_hostcond *)(op + 1); 358 if (cond->port != -1 && 359 cond->port != (op->code == INET_DIAG_BC_S_COND ? 360 entry->sport : entry->dport)) { 361 yes = 0; 362 break; 363 } 364 365 if (cond->prefix_len == 0) 366 break; 367 368 if (op->code == INET_DIAG_BC_S_COND) 369 addr = entry->saddr; 370 else 371 addr = entry->daddr; 372 373 if (bitstring_match(addr, cond->addr, 374 cond->prefix_len)) 375 break; 376 if (entry->family == AF_INET6 && 377 cond->family == AF_INET) { 378 if (addr[0] == 0 && addr[1] == 0 && 379 addr[2] == htonl(0xffff) && 380 bitstring_match(addr + 3, cond->addr, 381 cond->prefix_len)) 382 break; 383 } 384 yes = 0; 385 break; 386 } 387 } 388 389 if (yes) { 390 len -= op->yes; 391 bc += op->yes; 392 } else { 393 len -= op->no; 394 bc += op->no; 395 } 396 } 397 return (len == 0); 398 } 399 400 static int valid_cc(const void *bc, int len, int cc) 401 { 402 while (len >= 0) { 403 const struct inet_diag_bc_op *op = bc; 404 405 if (cc > len) 406 return 0; 407 if (cc == len) 408 return 1; 409 if (op->yes < 4) 410 return 0; 411 len -= op->yes; 412 bc += op->yes; 413 } 414 return 0; 415 } 416 417 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 418 { 419 const unsigned char *bc = bytecode; 420 int len = bytecode_len; 421 422 while (len > 0) { 423 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 424 425 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 426 switch (op->code) { 427 case INET_DIAG_BC_AUTO: 428 case INET_DIAG_BC_S_COND: 429 case INET_DIAG_BC_D_COND: 430 case INET_DIAG_BC_S_GE: 431 case INET_DIAG_BC_S_LE: 432 case INET_DIAG_BC_D_GE: 433 case INET_DIAG_BC_D_LE: 434 if (op->yes < 4 || op->yes > len + 4) 435 return -EINVAL; 436 case INET_DIAG_BC_JMP: 437 if (op->no < 4 || op->no > len + 4) 438 return -EINVAL; 439 if (op->no < len && 440 !valid_cc(bytecode, bytecode_len, len - op->no)) 441 return -EINVAL; 442 break; 443 case INET_DIAG_BC_NOP: 444 if (op->yes < 4 || op->yes > len + 4) 445 return -EINVAL; 446 break; 447 default: 448 return -EINVAL; 449 } 450 bc += op->yes; 451 len -= op->yes; 452 } 453 return len == 0 ? 0 : -EINVAL; 454 } 455 456 static int inet_csk_diag_dump(struct sock *sk, 457 struct sk_buff *skb, 458 struct netlink_callback *cb) 459 { 460 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 461 462 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 463 struct inet_diag_entry entry; 464 struct rtattr *bc = (struct rtattr *)(r + 1); 465 struct inet_sock *inet = inet_sk(sk); 466 467 entry.family = sk->sk_family; 468 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 469 if (entry.family == AF_INET6) { 470 struct ipv6_pinfo *np = inet6_sk(sk); 471 472 entry.saddr = np->rcv_saddr.s6_addr32; 473 entry.daddr = np->daddr.s6_addr32; 474 } else 475 #endif 476 { 477 entry.saddr = &inet->rcv_saddr; 478 entry.daddr = &inet->daddr; 479 } 480 entry.sport = inet->num; 481 entry.dport = ntohs(inet->dport); 482 entry.userlocks = sk->sk_userlocks; 483 484 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 485 return 0; 486 } 487 488 return inet_csk_diag_fill(sk, skb, r->idiag_ext, 489 NETLINK_CB(cb->skb).pid, 490 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 491 } 492 493 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, 494 struct sk_buff *skb, 495 struct netlink_callback *cb) 496 { 497 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 498 499 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 500 struct inet_diag_entry entry; 501 struct rtattr *bc = (struct rtattr *)(r + 1); 502 503 entry.family = tw->tw_family; 504 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 505 if (tw->tw_family == AF_INET6) { 506 struct inet6_timewait_sock *tw6 = 507 inet6_twsk((struct sock *)tw); 508 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32; 509 entry.daddr = tw6->tw_v6_daddr.s6_addr32; 510 } else 511 #endif 512 { 513 entry.saddr = &tw->tw_rcv_saddr; 514 entry.daddr = &tw->tw_daddr; 515 } 516 entry.sport = tw->tw_num; 517 entry.dport = ntohs(tw->tw_dport); 518 entry.userlocks = 0; 519 520 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 521 return 0; 522 } 523 524 return inet_twsk_diag_fill(tw, skb, r->idiag_ext, 525 NETLINK_CB(cb->skb).pid, 526 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 527 } 528 529 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 530 struct request_sock *req, u32 pid, u32 seq, 531 const struct nlmsghdr *unlh) 532 { 533 const struct inet_request_sock *ireq = inet_rsk(req); 534 struct inet_sock *inet = inet_sk(sk); 535 unsigned char *b = skb->tail; 536 struct inet_diag_msg *r; 537 struct nlmsghdr *nlh; 538 long tmo; 539 540 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 541 nlh->nlmsg_flags = NLM_F_MULTI; 542 r = NLMSG_DATA(nlh); 543 544 r->idiag_family = sk->sk_family; 545 r->idiag_state = TCP_SYN_RECV; 546 r->idiag_timer = 1; 547 r->idiag_retrans = req->retrans; 548 549 r->id.idiag_if = sk->sk_bound_dev_if; 550 r->id.idiag_cookie[0] = (u32)(unsigned long)req; 551 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1); 552 553 tmo = req->expires - jiffies; 554 if (tmo < 0) 555 tmo = 0; 556 557 r->id.idiag_sport = inet->sport; 558 r->id.idiag_dport = ireq->rmt_port; 559 r->id.idiag_src[0] = ireq->loc_addr; 560 r->id.idiag_dst[0] = ireq->rmt_addr; 561 r->idiag_expires = jiffies_to_msecs(tmo); 562 r->idiag_rqueue = 0; 563 r->idiag_wqueue = 0; 564 r->idiag_uid = sock_i_uid(sk); 565 r->idiag_inode = 0; 566 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 567 if (r->idiag_family == AF_INET6) { 568 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 569 &inet6_rsk(req)->loc_addr); 570 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 571 &inet6_rsk(req)->rmt_addr); 572 } 573 #endif 574 nlh->nlmsg_len = skb->tail - b; 575 576 return skb->len; 577 578 nlmsg_failure: 579 skb_trim(skb, b - skb->data); 580 return -1; 581 } 582 583 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 584 struct netlink_callback *cb) 585 { 586 struct inet_diag_entry entry; 587 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 588 struct inet_connection_sock *icsk = inet_csk(sk); 589 struct listen_sock *lopt; 590 struct rtattr *bc = NULL; 591 struct inet_sock *inet = inet_sk(sk); 592 int j, s_j; 593 int reqnum, s_reqnum; 594 int err = 0; 595 596 s_j = cb->args[3]; 597 s_reqnum = cb->args[4]; 598 599 if (s_j > 0) 600 s_j--; 601 602 entry.family = sk->sk_family; 603 604 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 605 606 lopt = icsk->icsk_accept_queue.listen_opt; 607 if (!lopt || !lopt->qlen) 608 goto out; 609 610 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 611 bc = (struct rtattr *)(r + 1); 612 entry.sport = inet->num; 613 entry.userlocks = sk->sk_userlocks; 614 } 615 616 for (j = s_j; j < lopt->nr_table_entries; j++) { 617 struct request_sock *req, *head = lopt->syn_table[j]; 618 619 reqnum = 0; 620 for (req = head; req; reqnum++, req = req->dl_next) { 621 struct inet_request_sock *ireq = inet_rsk(req); 622 623 if (reqnum < s_reqnum) 624 continue; 625 if (r->id.idiag_dport != ireq->rmt_port && 626 r->id.idiag_dport) 627 continue; 628 629 if (bc) { 630 entry.saddr = 631 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 632 (entry.family == AF_INET6) ? 633 inet6_rsk(req)->loc_addr.s6_addr32 : 634 #endif 635 &ireq->loc_addr; 636 entry.daddr = 637 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 638 (entry.family == AF_INET6) ? 639 inet6_rsk(req)->rmt_addr.s6_addr32 : 640 #endif 641 &ireq->rmt_addr; 642 entry.dport = ntohs(ireq->rmt_port); 643 644 if (!inet_diag_bc_run(RTA_DATA(bc), 645 RTA_PAYLOAD(bc), &entry)) 646 continue; 647 } 648 649 err = inet_diag_fill_req(skb, sk, req, 650 NETLINK_CB(cb->skb).pid, 651 cb->nlh->nlmsg_seq, cb->nlh); 652 if (err < 0) { 653 cb->args[3] = j + 1; 654 cb->args[4] = reqnum; 655 goto out; 656 } 657 } 658 659 s_reqnum = 0; 660 } 661 662 out: 663 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 664 665 return err; 666 } 667 668 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 669 { 670 int i, num; 671 int s_i, s_num; 672 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 673 const struct inet_diag_handler *handler; 674 struct inet_hashinfo *hashinfo; 675 676 handler = inet_diag_table[cb->nlh->nlmsg_type]; 677 BUG_ON(handler == NULL); 678 hashinfo = handler->idiag_hashinfo; 679 680 s_i = cb->args[1]; 681 s_num = num = cb->args[2]; 682 683 if (cb->args[0] == 0) { 684 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) 685 goto skip_listen_ht; 686 687 inet_listen_lock(hashinfo); 688 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { 689 struct sock *sk; 690 struct hlist_node *node; 691 692 num = 0; 693 sk_for_each(sk, node, &hashinfo->listening_hash[i]) { 694 struct inet_sock *inet = inet_sk(sk); 695 696 if (num < s_num) { 697 num++; 698 continue; 699 } 700 701 if (r->id.idiag_sport != inet->sport && 702 r->id.idiag_sport) 703 goto next_listen; 704 705 if (!(r->idiag_states & TCPF_LISTEN) || 706 r->id.idiag_dport || 707 cb->args[3] > 0) 708 goto syn_recv; 709 710 if (inet_csk_diag_dump(sk, skb, cb) < 0) { 711 inet_listen_unlock(hashinfo); 712 goto done; 713 } 714 715 syn_recv: 716 if (!(r->idiag_states & TCPF_SYN_RECV)) 717 goto next_listen; 718 719 if (inet_diag_dump_reqs(skb, sk, cb) < 0) { 720 inet_listen_unlock(hashinfo); 721 goto done; 722 } 723 724 next_listen: 725 cb->args[3] = 0; 726 cb->args[4] = 0; 727 ++num; 728 } 729 730 s_num = 0; 731 cb->args[3] = 0; 732 cb->args[4] = 0; 733 } 734 inet_listen_unlock(hashinfo); 735 skip_listen_ht: 736 cb->args[0] = 1; 737 s_i = num = s_num = 0; 738 } 739 740 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) 741 return skb->len; 742 743 for (i = s_i; i < hashinfo->ehash_size; i++) { 744 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; 745 struct sock *sk; 746 struct hlist_node *node; 747 748 if (i > s_i) 749 s_num = 0; 750 751 read_lock_bh(&head->lock); 752 num = 0; 753 sk_for_each(sk, node, &head->chain) { 754 struct inet_sock *inet = inet_sk(sk); 755 756 if (num < s_num) 757 goto next_normal; 758 if (!(r->idiag_states & (1 << sk->sk_state))) 759 goto next_normal; 760 if (r->id.idiag_sport != inet->sport && 761 r->id.idiag_sport) 762 goto next_normal; 763 if (r->id.idiag_dport != inet->dport && 764 r->id.idiag_dport) 765 goto next_normal; 766 if (inet_csk_diag_dump(sk, skb, cb) < 0) { 767 read_unlock_bh(&head->lock); 768 goto done; 769 } 770 next_normal: 771 ++num; 772 } 773 774 if (r->idiag_states & TCPF_TIME_WAIT) { 775 struct inet_timewait_sock *tw; 776 777 inet_twsk_for_each(tw, node, 778 &hashinfo->ehash[i + hashinfo->ehash_size].chain) { 779 780 if (num < s_num) 781 goto next_dying; 782 if (r->id.idiag_sport != tw->tw_sport && 783 r->id.idiag_sport) 784 goto next_dying; 785 if (r->id.idiag_dport != tw->tw_dport && 786 r->id.idiag_dport) 787 goto next_dying; 788 if (inet_twsk_diag_dump(tw, skb, cb) < 0) { 789 read_unlock_bh(&head->lock); 790 goto done; 791 } 792 next_dying: 793 ++num; 794 } 795 } 796 read_unlock_bh(&head->lock); 797 } 798 799 done: 800 cb->args[1] = i; 801 cb->args[2] = num; 802 return skb->len; 803 } 804 805 static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 806 { 807 if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) 808 return 0; 809 810 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX) 811 goto err_inval; 812 813 if (inet_diag_table[nlh->nlmsg_type] == NULL) 814 return -ENOENT; 815 816 if (NLMSG_LENGTH(sizeof(struct inet_diag_req)) > skb->len) 817 goto err_inval; 818 819 if (nlh->nlmsg_flags&NLM_F_DUMP) { 820 if (nlh->nlmsg_len > 821 (4 + NLMSG_SPACE(sizeof(struct inet_diag_req)))) { 822 struct rtattr *rta = (void *)(NLMSG_DATA(nlh) + 823 sizeof(struct inet_diag_req)); 824 if (rta->rta_type != INET_DIAG_REQ_BYTECODE || 825 rta->rta_len < 8 || 826 rta->rta_len > 827 (nlh->nlmsg_len - 828 NLMSG_SPACE(sizeof(struct inet_diag_req)))) 829 goto err_inval; 830 if (inet_diag_bc_audit(RTA_DATA(rta), RTA_PAYLOAD(rta))) 831 goto err_inval; 832 } 833 return netlink_dump_start(idiagnl, skb, nlh, 834 inet_diag_dump, NULL); 835 } else 836 return inet_diag_get_exact(skb, nlh); 837 838 err_inval: 839 return -EINVAL; 840 } 841 842 843 static inline void inet_diag_rcv_skb(struct sk_buff *skb) 844 { 845 if (skb->len >= NLMSG_SPACE(0)) { 846 int err; 847 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; 848 849 if (nlh->nlmsg_len < sizeof(*nlh) || 850 skb->len < nlh->nlmsg_len) 851 return; 852 err = inet_diag_rcv_msg(skb, nlh); 853 if (err || nlh->nlmsg_flags & NLM_F_ACK) 854 netlink_ack(skb, nlh, err); 855 } 856 } 857 858 static void inet_diag_rcv(struct sock *sk, int len) 859 { 860 struct sk_buff *skb; 861 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); 862 863 while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) { 864 inet_diag_rcv_skb(skb); 865 kfree_skb(skb); 866 } 867 } 868 869 static DEFINE_SPINLOCK(inet_diag_register_lock); 870 871 int inet_diag_register(const struct inet_diag_handler *h) 872 { 873 const __u16 type = h->idiag_type; 874 int err = -EINVAL; 875 876 if (type >= INET_DIAG_GETSOCK_MAX) 877 goto out; 878 879 spin_lock(&inet_diag_register_lock); 880 err = -EEXIST; 881 if (inet_diag_table[type] == NULL) { 882 inet_diag_table[type] = h; 883 err = 0; 884 } 885 spin_unlock(&inet_diag_register_lock); 886 out: 887 return err; 888 } 889 EXPORT_SYMBOL_GPL(inet_diag_register); 890 891 void inet_diag_unregister(const struct inet_diag_handler *h) 892 { 893 const __u16 type = h->idiag_type; 894 895 if (type >= INET_DIAG_GETSOCK_MAX) 896 return; 897 898 spin_lock(&inet_diag_register_lock); 899 inet_diag_table[type] = NULL; 900 spin_unlock(&inet_diag_register_lock); 901 902 synchronize_rcu(); 903 } 904 EXPORT_SYMBOL_GPL(inet_diag_unregister); 905 906 static int __init inet_diag_init(void) 907 { 908 const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX * 909 sizeof(struct inet_diag_handler *)); 910 int err = -ENOMEM; 911 912 inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); 913 if (!inet_diag_table) 914 goto out; 915 916 memset(inet_diag_table, 0, inet_diag_table_size); 917 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, 918 THIS_MODULE); 919 if (idiagnl == NULL) 920 goto out_free_table; 921 err = 0; 922 out: 923 return err; 924 out_free_table: 925 kfree(inet_diag_table); 926 goto out; 927 } 928 929 static void __exit inet_diag_exit(void) 930 { 931 sock_release(idiagnl->sk_socket); 932 kfree(inet_diag_table); 933 } 934 935 module_init(inet_diag_init); 936 module_exit(inet_diag_exit); 937 MODULE_LICENSE("GPL"); 938