1 /* Connection tracking via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org> 8 * 9 * Initial connection tracking via netlink development funded and 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 11 * 12 * Further development of this code funded by Astaro AG (http://www.astaro.com) 13 * 14 * This software may be used and distributed according to the terms 15 * of the GNU General Public License, incorporated herein by reference. 16 */ 17 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/rculist.h> 22 #include <linux/rculist_nulls.h> 23 #include <linux/types.h> 24 #include <linux/timer.h> 25 #include <linux/security.h> 26 #include <linux/skbuff.h> 27 #include <linux/errno.h> 28 #include <linux/netlink.h> 29 #include <linux/spinlock.h> 30 #include <linux/interrupt.h> 31 #include <linux/slab.h> 32 #include <linux/siphash.h> 33 34 #include <linux/netfilter.h> 35 #include <net/netlink.h> 36 #include <net/sock.h> 37 #include <net/netfilter/nf_conntrack.h> 38 #include <net/netfilter/nf_conntrack_core.h> 39 #include <net/netfilter/nf_conntrack_expect.h> 40 #include <net/netfilter/nf_conntrack_helper.h> 41 #include <net/netfilter/nf_conntrack_seqadj.h> 42 #include <net/netfilter/nf_conntrack_l4proto.h> 43 #include <net/netfilter/nf_conntrack_tuple.h> 44 #include <net/netfilter/nf_conntrack_acct.h> 45 #include <net/netfilter/nf_conntrack_zones.h> 46 #include <net/netfilter/nf_conntrack_timestamp.h> 47 #include <net/netfilter/nf_conntrack_labels.h> 48 #include <net/netfilter/nf_conntrack_synproxy.h> 49 #if IS_ENABLED(CONFIG_NF_NAT) 50 #include <net/netfilter/nf_nat.h> 51 #include <net/netfilter/nf_nat_helper.h> 52 #endif 53 54 #include <linux/netfilter/nfnetlink.h> 55 #include <linux/netfilter/nfnetlink_conntrack.h> 56 57 #include "nf_internals.h" 58 59 MODULE_LICENSE("GPL"); 60 MODULE_DESCRIPTION("List and change connection tracking table"); 61 62 struct ctnetlink_list_dump_ctx { 63 struct nf_conn *last; 64 unsigned int cpu; 65 bool done; 66 }; 67 68 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, 69 const struct nf_conntrack_tuple *tuple, 70 const struct nf_conntrack_l4proto *l4proto) 71 { 72 int ret = 0; 73 struct nlattr *nest_parms; 74 75 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO); 76 if (!nest_parms) 77 goto nla_put_failure; 78 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) 79 goto nla_put_failure; 80 81 if (likely(l4proto->tuple_to_nlattr)) 82 ret = l4proto->tuple_to_nlattr(skb, tuple); 83 84 nla_nest_end(skb, nest_parms); 85 86 return ret; 87 88 nla_put_failure: 89 return -1; 90 } 91 92 static int ipv4_tuple_to_nlattr(struct sk_buff *skb, 93 const struct nf_conntrack_tuple *tuple) 94 { 95 if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || 96 nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) 97 return -EMSGSIZE; 98 return 0; 99 } 100 101 static int ipv6_tuple_to_nlattr(struct sk_buff *skb, 102 const struct nf_conntrack_tuple *tuple) 103 { 104 if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) || 105 nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6)) 106 return -EMSGSIZE; 107 return 0; 108 } 109 110 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, 111 const struct nf_conntrack_tuple *tuple) 112 { 113 int ret = 0; 114 struct nlattr *nest_parms; 115 116 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP); 117 if (!nest_parms) 118 goto nla_put_failure; 119 120 switch (tuple->src.l3num) { 121 case NFPROTO_IPV4: 122 ret = ipv4_tuple_to_nlattr(skb, tuple); 123 break; 124 case NFPROTO_IPV6: 125 ret = ipv6_tuple_to_nlattr(skb, tuple); 126 break; 127 } 128 129 nla_nest_end(skb, nest_parms); 130 131 return ret; 132 133 nla_put_failure: 134 return -1; 135 } 136 137 static int ctnetlink_dump_tuples(struct sk_buff *skb, 138 const struct nf_conntrack_tuple *tuple) 139 { 140 const struct nf_conntrack_l4proto *l4proto; 141 int ret; 142 143 rcu_read_lock(); 144 ret = ctnetlink_dump_tuples_ip(skb, tuple); 145 146 if (ret >= 0) { 147 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 148 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); 149 } 150 rcu_read_unlock(); 151 return ret; 152 } 153 154 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype, 155 const struct nf_conntrack_zone *zone, int dir) 156 { 157 if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir) 158 return 0; 159 if (nla_put_be16(skb, attrtype, htons(zone->id))) 160 goto nla_put_failure; 161 return 0; 162 163 nla_put_failure: 164 return -1; 165 } 166 167 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) 168 { 169 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) 170 goto nla_put_failure; 171 return 0; 172 173 nla_put_failure: 174 return -1; 175 } 176 177 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct, 178 bool skip_zero) 179 { 180 long timeout; 181 182 if (nf_ct_is_confirmed(ct)) 183 timeout = nf_ct_expires(ct) / HZ; 184 else 185 timeout = ct->timeout / HZ; 186 187 if (skip_zero && timeout == 0) 188 return 0; 189 190 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout))) 191 goto nla_put_failure; 192 return 0; 193 194 nla_put_failure: 195 return -1; 196 } 197 198 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct, 199 bool destroy) 200 { 201 const struct nf_conntrack_l4proto *l4proto; 202 struct nlattr *nest_proto; 203 int ret; 204 205 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 206 if (!l4proto->to_nlattr) 207 return 0; 208 209 nest_proto = nla_nest_start(skb, CTA_PROTOINFO); 210 if (!nest_proto) 211 goto nla_put_failure; 212 213 ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy); 214 215 nla_nest_end(skb, nest_proto); 216 217 return ret; 218 219 nla_put_failure: 220 return -1; 221 } 222 223 static int ctnetlink_dump_helpinfo(struct sk_buff *skb, 224 const struct nf_conn *ct) 225 { 226 struct nlattr *nest_helper; 227 const struct nf_conn_help *help = nfct_help(ct); 228 struct nf_conntrack_helper *helper; 229 230 if (!help) 231 return 0; 232 233 rcu_read_lock(); 234 helper = rcu_dereference(help->helper); 235 if (!helper) 236 goto out; 237 238 nest_helper = nla_nest_start(skb, CTA_HELP); 239 if (!nest_helper) 240 goto nla_put_failure; 241 if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) 242 goto nla_put_failure; 243 244 if (helper->to_nlattr) 245 helper->to_nlattr(skb, ct); 246 247 nla_nest_end(skb, nest_helper); 248 out: 249 rcu_read_unlock(); 250 return 0; 251 252 nla_put_failure: 253 rcu_read_unlock(); 254 return -1; 255 } 256 257 static int 258 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct, 259 enum ip_conntrack_dir dir, int type) 260 { 261 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; 262 struct nf_conn_counter *counter = acct->counter; 263 struct nlattr *nest_count; 264 u64 pkts, bytes; 265 266 if (type == IPCTNL_MSG_CT_GET_CTRZERO) { 267 pkts = atomic64_xchg(&counter[dir].packets, 0); 268 bytes = atomic64_xchg(&counter[dir].bytes, 0); 269 } else { 270 pkts = atomic64_read(&counter[dir].packets); 271 bytes = atomic64_read(&counter[dir].bytes); 272 } 273 274 nest_count = nla_nest_start(skb, attr); 275 if (!nest_count) 276 goto nla_put_failure; 277 278 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts), 279 CTA_COUNTERS_PAD) || 280 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes), 281 CTA_COUNTERS_PAD)) 282 goto nla_put_failure; 283 284 nla_nest_end(skb, nest_count); 285 286 return 0; 287 288 nla_put_failure: 289 return -1; 290 } 291 292 static int 293 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type) 294 { 295 struct nf_conn_acct *acct = nf_conn_acct_find(ct); 296 297 if (!acct) 298 return 0; 299 300 if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0) 301 return -1; 302 if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0) 303 return -1; 304 305 return 0; 306 } 307 308 static int 309 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) 310 { 311 struct nlattr *nest_count; 312 const struct nf_conn_tstamp *tstamp; 313 314 tstamp = nf_conn_tstamp_find(ct); 315 if (!tstamp) 316 return 0; 317 318 nest_count = nla_nest_start(skb, CTA_TIMESTAMP); 319 if (!nest_count) 320 goto nla_put_failure; 321 322 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start), 323 CTA_TIMESTAMP_PAD) || 324 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, 325 cpu_to_be64(tstamp->stop), 326 CTA_TIMESTAMP_PAD))) 327 goto nla_put_failure; 328 nla_nest_end(skb, nest_count); 329 330 return 0; 331 332 nla_put_failure: 333 return -1; 334 } 335 336 #ifdef CONFIG_NF_CONNTRACK_MARK 337 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct, 338 bool dump) 339 { 340 u32 mark = READ_ONCE(ct->mark); 341 342 if (!mark && !dump) 343 return 0; 344 345 if (nla_put_be32(skb, CTA_MARK, htonl(mark))) 346 goto nla_put_failure; 347 return 0; 348 349 nla_put_failure: 350 return -1; 351 } 352 #else 353 #define ctnetlink_dump_mark(a, b, c) (0) 354 #endif 355 356 #ifdef CONFIG_NF_CONNTRACK_SECMARK 357 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) 358 { 359 struct nlattr *nest_secctx; 360 struct lsm_context ctx; 361 int ret; 362 363 ret = security_secid_to_secctx(ct->secmark, &ctx); 364 if (ret < 0) 365 return 0; 366 367 ret = -1; 368 nest_secctx = nla_nest_start(skb, CTA_SECCTX); 369 if (!nest_secctx) 370 goto nla_put_failure; 371 372 if (nla_put_string(skb, CTA_SECCTX_NAME, ctx.context)) 373 goto nla_put_failure; 374 nla_nest_end(skb, nest_secctx); 375 376 ret = 0; 377 nla_put_failure: 378 security_release_secctx(&ctx); 379 return ret; 380 } 381 #else 382 #define ctnetlink_dump_secctx(a, b) (0) 383 #endif 384 385 #ifdef CONFIG_NF_CONNTRACK_EVENTS 386 static int 387 ctnetlink_dump_event_timestamp(struct sk_buff *skb, const struct nf_conn *ct) 388 { 389 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 390 const struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct); 391 392 if (e) { 393 u64 ts = local64_read(&e->timestamp); 394 395 if (ts) 396 return nla_put_be64(skb, CTA_TIMESTAMP_EVENT, 397 cpu_to_be64(ts), CTA_TIMESTAMP_PAD); 398 } 399 #endif 400 return 0; 401 } 402 403 static inline int ctnetlink_label_size(const struct nf_conn *ct) 404 { 405 struct nf_conn_labels *labels = nf_ct_labels_find(ct); 406 407 if (!labels) 408 return 0; 409 return nla_total_size(sizeof(labels->bits)); 410 } 411 #endif 412 413 static int 414 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) 415 { 416 struct nf_conn_labels *labels = nf_ct_labels_find(ct); 417 unsigned int i; 418 419 if (!labels) 420 return 0; 421 422 i = 0; 423 do { 424 if (labels->bits[i] != 0) 425 return nla_put(skb, CTA_LABELS, sizeof(labels->bits), 426 labels->bits); 427 i++; 428 } while (i < ARRAY_SIZE(labels->bits)); 429 430 return 0; 431 } 432 433 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 434 435 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) 436 { 437 struct nlattr *nest_parms; 438 439 if (!(ct->status & IPS_EXPECTED)) 440 return 0; 441 442 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER); 443 if (!nest_parms) 444 goto nla_put_failure; 445 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) 446 goto nla_put_failure; 447 nla_nest_end(skb, nest_parms); 448 449 return 0; 450 451 nla_put_failure: 452 return -1; 453 } 454 455 static int 456 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type) 457 { 458 struct nlattr *nest_parms; 459 460 nest_parms = nla_nest_start(skb, type); 461 if (!nest_parms) 462 goto nla_put_failure; 463 464 if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS, 465 htonl(seq->correction_pos)) || 466 nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE, 467 htonl(seq->offset_before)) || 468 nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER, 469 htonl(seq->offset_after))) 470 goto nla_put_failure; 471 472 nla_nest_end(skb, nest_parms); 473 474 return 0; 475 476 nla_put_failure: 477 return -1; 478 } 479 480 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct) 481 { 482 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 483 struct nf_ct_seqadj *seq; 484 485 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj) 486 return 0; 487 488 spin_lock_bh(&ct->lock); 489 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL]; 490 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1) 491 goto err; 492 493 seq = &seqadj->seq[IP_CT_DIR_REPLY]; 494 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1) 495 goto err; 496 497 spin_unlock_bh(&ct->lock); 498 return 0; 499 err: 500 spin_unlock_bh(&ct->lock); 501 return -1; 502 } 503 504 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct) 505 { 506 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); 507 struct nlattr *nest_parms; 508 509 if (!synproxy) 510 return 0; 511 512 nest_parms = nla_nest_start(skb, CTA_SYNPROXY); 513 if (!nest_parms) 514 goto nla_put_failure; 515 516 if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) || 517 nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) || 518 nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff))) 519 goto nla_put_failure; 520 521 nla_nest_end(skb, nest_parms); 522 523 return 0; 524 525 nla_put_failure: 526 return -1; 527 } 528 529 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) 530 { 531 __be32 id = (__force __be32)nf_ct_get_id(ct); 532 533 if (nla_put_be32(skb, CTA_ID, id)) 534 goto nla_put_failure; 535 return 0; 536 537 nla_put_failure: 538 return -1; 539 } 540 541 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) 542 { 543 if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use)))) 544 goto nla_put_failure; 545 return 0; 546 547 nla_put_failure: 548 return -1; 549 } 550 551 /* all these functions access ct->ext. Caller must either hold a reference 552 * on ct or prevent its deletion by holding either the bucket spinlock or 553 * pcpu dying list lock. 554 */ 555 static int ctnetlink_dump_extinfo(struct sk_buff *skb, 556 struct nf_conn *ct, u32 type) 557 { 558 if (ctnetlink_dump_acct(skb, ct, type) < 0 || 559 ctnetlink_dump_timestamp(skb, ct) < 0 || 560 ctnetlink_dump_helpinfo(skb, ct) < 0 || 561 ctnetlink_dump_labels(skb, ct) < 0 || 562 ctnetlink_dump_ct_seq_adj(skb, ct) < 0 || 563 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 564 return -1; 565 566 return 0; 567 } 568 569 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) 570 { 571 if (ctnetlink_dump_status(skb, ct) < 0 || 572 ctnetlink_dump_mark(skb, ct, true) < 0 || 573 ctnetlink_dump_secctx(skb, ct) < 0 || 574 ctnetlink_dump_id(skb, ct) < 0 || 575 ctnetlink_dump_use(skb, ct) < 0 || 576 ctnetlink_dump_master(skb, ct) < 0) 577 return -1; 578 579 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) && 580 (ctnetlink_dump_timeout(skb, ct, false) < 0 || 581 ctnetlink_dump_protoinfo(skb, ct, false) < 0)) 582 return -1; 583 584 return 0; 585 } 586 587 static int 588 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, 589 struct nf_conn *ct, bool extinfo, unsigned int flags) 590 { 591 const struct nf_conntrack_zone *zone; 592 struct nlmsghdr *nlh; 593 struct nlattr *nest_parms; 594 unsigned int event; 595 596 if (portid) 597 flags |= NLM_F_MULTI; 598 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); 599 nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct), 600 NFNETLINK_V0, 0); 601 if (!nlh) 602 goto nlmsg_failure; 603 604 zone = nf_ct_zone(ct); 605 606 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 607 if (!nest_parms) 608 goto nla_put_failure; 609 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 610 goto nla_put_failure; 611 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 612 NF_CT_ZONE_DIR_ORIG) < 0) 613 goto nla_put_failure; 614 nla_nest_end(skb, nest_parms); 615 616 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 617 if (!nest_parms) 618 goto nla_put_failure; 619 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 620 goto nla_put_failure; 621 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 622 NF_CT_ZONE_DIR_REPL) < 0) 623 goto nla_put_failure; 624 nla_nest_end(skb, nest_parms); 625 626 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 627 NF_CT_DEFAULT_ZONE_DIR) < 0) 628 goto nla_put_failure; 629 630 if (ctnetlink_dump_info(skb, ct) < 0) 631 goto nla_put_failure; 632 if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0) 633 goto nla_put_failure; 634 635 nlmsg_end(skb, nlh); 636 return skb->len; 637 638 nlmsg_failure: 639 nla_put_failure: 640 nlmsg_cancel(skb, nlh); 641 return -1; 642 } 643 644 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = { 645 [CTA_IP_V4_SRC] = { .type = NLA_U32 }, 646 [CTA_IP_V4_DST] = { .type = NLA_U32 }, 647 [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 }, 648 [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 }, 649 }; 650 651 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS) 652 static size_t ctnetlink_proto_size(const struct nf_conn *ct) 653 { 654 const struct nf_conntrack_l4proto *l4proto; 655 size_t len, len4 = 0; 656 657 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); 658 len *= 3u; /* ORIG, REPLY, MASTER */ 659 660 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 661 len += l4proto->nlattr_size; 662 if (l4proto->nlattr_tuple_size) { 663 len4 = l4proto->nlattr_tuple_size(); 664 len4 *= 3u; /* ORIG, REPLY, MASTER */ 665 } 666 667 return len + len4; 668 } 669 670 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct) 671 { 672 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) 673 return 0; 674 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ 675 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ 676 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ 677 ; 678 } 679 680 static inline int ctnetlink_secctx_size(const struct nf_conn *ct) 681 { 682 #ifdef CONFIG_NF_CONNTRACK_SECMARK 683 int ret; 684 685 ret = security_secid_to_secctx(ct->secmark, NULL); 686 if (ret < 0) 687 return 0; 688 689 return nla_total_size(0) /* CTA_SECCTX */ 690 + nla_total_size(sizeof(char) * ret); /* CTA_SECCTX_NAME */ 691 #else 692 return 0; 693 #endif 694 } 695 696 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) 697 { 698 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 699 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) 700 return 0; 701 return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t)); 702 #else 703 return 0; 704 #endif 705 } 706 #endif 707 708 #ifdef CONFIG_NF_CONNTRACK_EVENTS 709 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) 710 { 711 return NLMSG_ALIGN(sizeof(struct nfgenmsg)) 712 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 713 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 714 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 715 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ 716 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ 717 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ 718 + ctnetlink_acct_size(ct) 719 + ctnetlink_timestamp_size(ct) 720 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ 721 + nla_total_size(0) /* CTA_PROTOINFO */ 722 + nla_total_size(0) /* CTA_HELP */ 723 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 724 + ctnetlink_secctx_size(ct) 725 #if IS_ENABLED(CONFIG_NF_NAT) 726 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 727 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ 728 #endif 729 #ifdef CONFIG_NF_CONNTRACK_MARK 730 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 731 #endif 732 #ifdef CONFIG_NF_CONNTRACK_ZONES 733 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ 734 #endif 735 + ctnetlink_proto_size(ct) 736 + ctnetlink_label_size(ct) 737 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 738 + nla_total_size(sizeof(u64)) /* CTA_TIMESTAMP_EVENT */ 739 #endif 740 ; 741 } 742 743 static int 744 ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) 745 { 746 const struct nf_conntrack_zone *zone; 747 struct net *net; 748 struct nlmsghdr *nlh; 749 struct nlattr *nest_parms; 750 struct nf_conn *ct = item->ct; 751 struct sk_buff *skb; 752 unsigned int type; 753 unsigned int flags = 0, group; 754 int err; 755 756 if (events & (1 << IPCT_DESTROY)) { 757 type = IPCTNL_MSG_CT_DELETE; 758 group = NFNLGRP_CONNTRACK_DESTROY; 759 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { 760 type = IPCTNL_MSG_CT_NEW; 761 flags = NLM_F_CREATE|NLM_F_EXCL; 762 group = NFNLGRP_CONNTRACK_NEW; 763 } else if (events) { 764 type = IPCTNL_MSG_CT_NEW; 765 group = NFNLGRP_CONNTRACK_UPDATE; 766 } else 767 return 0; 768 769 net = nf_ct_net(ct); 770 if (!item->report && !nfnetlink_has_listeners(net, group)) 771 return 0; 772 773 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); 774 if (skb == NULL) 775 goto errout; 776 777 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type); 778 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct), 779 NFNETLINK_V0, 0); 780 if (!nlh) 781 goto nlmsg_failure; 782 783 zone = nf_ct_zone(ct); 784 785 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 786 if (!nest_parms) 787 goto nla_put_failure; 788 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 789 goto nla_put_failure; 790 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 791 NF_CT_ZONE_DIR_ORIG) < 0) 792 goto nla_put_failure; 793 nla_nest_end(skb, nest_parms); 794 795 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 796 if (!nest_parms) 797 goto nla_put_failure; 798 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 799 goto nla_put_failure; 800 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 801 NF_CT_ZONE_DIR_REPL) < 0) 802 goto nla_put_failure; 803 nla_nest_end(skb, nest_parms); 804 805 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 806 NF_CT_DEFAULT_ZONE_DIR) < 0) 807 goto nla_put_failure; 808 809 if (ctnetlink_dump_id(skb, ct) < 0) 810 goto nla_put_failure; 811 812 if (ctnetlink_dump_status(skb, ct) < 0) 813 goto nla_put_failure; 814 815 if (events & (1 << IPCT_DESTROY)) { 816 if (ctnetlink_dump_timeout(skb, ct, true) < 0) 817 goto nla_put_failure; 818 819 if (ctnetlink_dump_acct(skb, ct, type) < 0 || 820 ctnetlink_dump_timestamp(skb, ct) < 0 || 821 ctnetlink_dump_protoinfo(skb, ct, true) < 0) 822 goto nla_put_failure; 823 } else { 824 if (ctnetlink_dump_timeout(skb, ct, false) < 0) 825 goto nla_put_failure; 826 827 if (events & (1 << IPCT_PROTOINFO) && 828 ctnetlink_dump_protoinfo(skb, ct, false) < 0) 829 goto nla_put_failure; 830 831 if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) 832 && ctnetlink_dump_helpinfo(skb, ct) < 0) 833 goto nla_put_failure; 834 835 #ifdef CONFIG_NF_CONNTRACK_SECMARK 836 if ((events & (1 << IPCT_SECMARK) || ct->secmark) 837 && ctnetlink_dump_secctx(skb, ct) < 0) 838 goto nla_put_failure; 839 #endif 840 if (events & (1 << IPCT_LABEL) && 841 ctnetlink_dump_labels(skb, ct) < 0) 842 goto nla_put_failure; 843 844 if (events & (1 << IPCT_RELATED) && 845 ctnetlink_dump_master(skb, ct) < 0) 846 goto nla_put_failure; 847 848 if (events & (1 << IPCT_SEQADJ) && 849 ctnetlink_dump_ct_seq_adj(skb, ct) < 0) 850 goto nla_put_failure; 851 852 if (events & (1 << IPCT_SYNPROXY) && 853 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 854 goto nla_put_failure; 855 } 856 857 #ifdef CONFIG_NF_CONNTRACK_MARK 858 if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK))) 859 goto nla_put_failure; 860 #endif 861 862 if (ctnetlink_dump_event_timestamp(skb, ct)) 863 goto nla_put_failure; 864 865 nlmsg_end(skb, nlh); 866 err = nfnetlink_send(skb, net, item->portid, group, item->report, 867 GFP_ATOMIC); 868 if (err == -ENOBUFS || err == -EAGAIN) 869 return -ENOBUFS; 870 871 return 0; 872 873 nla_put_failure: 874 nlmsg_cancel(skb, nlh); 875 nlmsg_failure: 876 kfree_skb(skb); 877 errout: 878 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) 879 return -ENOBUFS; 880 881 return 0; 882 } 883 #endif /* CONFIG_NF_CONNTRACK_EVENTS */ 884 885 static int ctnetlink_done(struct netlink_callback *cb) 886 { 887 kfree(cb->data); 888 return 0; 889 } 890 891 struct ctnetlink_filter_u32 { 892 u32 val; 893 u32 mask; 894 }; 895 896 struct ctnetlink_filter { 897 u8 family; 898 bool zone_filter; 899 900 u_int32_t orig_flags; 901 u_int32_t reply_flags; 902 903 struct nf_conntrack_tuple orig; 904 struct nf_conntrack_tuple reply; 905 struct nf_conntrack_zone zone; 906 907 struct ctnetlink_filter_u32 mark; 908 struct ctnetlink_filter_u32 status; 909 }; 910 911 static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = { 912 [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 }, 913 [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 }, 914 }; 915 916 static int ctnetlink_parse_filter(const struct nlattr *attr, 917 struct ctnetlink_filter *filter) 918 { 919 struct nlattr *tb[CTA_FILTER_MAX + 1]; 920 int ret = 0; 921 922 ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy, 923 NULL); 924 if (ret) 925 return ret; 926 927 if (tb[CTA_FILTER_ORIG_FLAGS]) { 928 filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]); 929 if (filter->orig_flags & ~CTA_FILTER_F_ALL) 930 return -EOPNOTSUPP; 931 } 932 933 if (tb[CTA_FILTER_REPLY_FLAGS]) { 934 filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]); 935 if (filter->reply_flags & ~CTA_FILTER_F_ALL) 936 return -EOPNOTSUPP; 937 } 938 939 return 0; 940 } 941 942 static int ctnetlink_parse_zone(const struct nlattr *attr, 943 struct nf_conntrack_zone *zone); 944 static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], 945 struct nf_conntrack_tuple *tuple, 946 u32 type, u_int8_t l3num, 947 struct nf_conntrack_zone *zone, 948 u_int32_t flags); 949 950 static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark, 951 const struct nlattr * const cda[]) 952 { 953 #ifdef CONFIG_NF_CONNTRACK_MARK 954 if (cda[CTA_MARK]) { 955 mark->val = ntohl(nla_get_be32(cda[CTA_MARK])); 956 957 if (cda[CTA_MARK_MASK]) 958 mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); 959 else 960 mark->mask = 0xffffffff; 961 } else if (cda[CTA_MARK_MASK]) { 962 return -EINVAL; 963 } 964 #endif 965 return 0; 966 } 967 968 static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status, 969 const struct nlattr * const cda[]) 970 { 971 if (cda[CTA_STATUS]) { 972 status->val = ntohl(nla_get_be32(cda[CTA_STATUS])); 973 if (cda[CTA_STATUS_MASK]) 974 status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK])); 975 else 976 status->mask = status->val; 977 978 /* status->val == 0? always true, else always false. */ 979 if (status->mask == 0) 980 return -EINVAL; 981 } else if (cda[CTA_STATUS_MASK]) { 982 return -EINVAL; 983 } 984 985 /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */ 986 BUILD_BUG_ON(__IPS_MAX_BIT >= 32); 987 return 0; 988 } 989 990 static struct ctnetlink_filter * 991 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) 992 { 993 struct ctnetlink_filter *filter; 994 int err; 995 996 #ifndef CONFIG_NF_CONNTRACK_MARK 997 if (cda[CTA_MARK] || cda[CTA_MARK_MASK]) 998 return ERR_PTR(-EOPNOTSUPP); 999 #endif 1000 1001 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1002 if (filter == NULL) 1003 return ERR_PTR(-ENOMEM); 1004 1005 filter->family = family; 1006 1007 err = ctnetlink_filter_parse_mark(&filter->mark, cda); 1008 if (err) 1009 goto err_filter; 1010 1011 err = ctnetlink_filter_parse_status(&filter->status, cda); 1012 if (err) 1013 goto err_filter; 1014 1015 if (cda[CTA_ZONE]) { 1016 err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone); 1017 if (err < 0) 1018 goto err_filter; 1019 filter->zone_filter = true; 1020 } 1021 1022 if (!cda[CTA_FILTER]) 1023 return filter; 1024 1025 err = ctnetlink_parse_filter(cda[CTA_FILTER], filter); 1026 if (err < 0) 1027 goto err_filter; 1028 1029 if (filter->orig_flags) { 1030 if (!cda[CTA_TUPLE_ORIG]) { 1031 err = -EINVAL; 1032 goto err_filter; 1033 } 1034 1035 err = ctnetlink_parse_tuple_filter(cda, &filter->orig, 1036 CTA_TUPLE_ORIG, 1037 filter->family, 1038 &filter->zone, 1039 filter->orig_flags); 1040 if (err < 0) 1041 goto err_filter; 1042 } 1043 1044 if (filter->reply_flags) { 1045 if (!cda[CTA_TUPLE_REPLY]) { 1046 err = -EINVAL; 1047 goto err_filter; 1048 } 1049 1050 err = ctnetlink_parse_tuple_filter(cda, &filter->reply, 1051 CTA_TUPLE_REPLY, 1052 filter->family, 1053 &filter->zone, 1054 filter->reply_flags); 1055 if (err < 0) 1056 goto err_filter; 1057 } 1058 1059 return filter; 1060 1061 err_filter: 1062 kfree(filter); 1063 1064 return ERR_PTR(err); 1065 } 1066 1067 static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda) 1068 { 1069 return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS] || cda[CTA_ZONE]; 1070 } 1071 1072 static int ctnetlink_start(struct netlink_callback *cb) 1073 { 1074 const struct nlattr * const *cda = cb->data; 1075 struct ctnetlink_filter *filter = NULL; 1076 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1077 u8 family = nfmsg->nfgen_family; 1078 1079 if (ctnetlink_needs_filter(family, cda)) { 1080 filter = ctnetlink_alloc_filter(cda, family); 1081 if (IS_ERR(filter)) 1082 return PTR_ERR(filter); 1083 } 1084 1085 cb->data = filter; 1086 return 0; 1087 } 1088 1089 static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple, 1090 struct nf_conntrack_tuple *ct_tuple, 1091 u_int32_t flags, int family) 1092 { 1093 switch (family) { 1094 case NFPROTO_IPV4: 1095 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && 1096 filter_tuple->src.u3.ip != ct_tuple->src.u3.ip) 1097 return 0; 1098 1099 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && 1100 filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip) 1101 return 0; 1102 break; 1103 case NFPROTO_IPV6: 1104 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && 1105 !ipv6_addr_cmp(&filter_tuple->src.u3.in6, 1106 &ct_tuple->src.u3.in6)) 1107 return 0; 1108 1109 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && 1110 !ipv6_addr_cmp(&filter_tuple->dst.u3.in6, 1111 &ct_tuple->dst.u3.in6)) 1112 return 0; 1113 break; 1114 } 1115 1116 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) && 1117 filter_tuple->dst.protonum != ct_tuple->dst.protonum) 1118 return 0; 1119 1120 switch (ct_tuple->dst.protonum) { 1121 case IPPROTO_TCP: 1122 case IPPROTO_UDP: 1123 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) && 1124 filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port) 1125 return 0; 1126 1127 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) && 1128 filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port) 1129 return 0; 1130 break; 1131 case IPPROTO_ICMP: 1132 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) && 1133 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) 1134 return 0; 1135 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) && 1136 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) 1137 return 0; 1138 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) && 1139 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) 1140 return 0; 1141 break; 1142 case IPPROTO_ICMPV6: 1143 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) && 1144 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) 1145 return 0; 1146 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) && 1147 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) 1148 return 0; 1149 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) && 1150 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) 1151 return 0; 1152 break; 1153 } 1154 1155 return 1; 1156 } 1157 1158 static int ctnetlink_filter_match(struct nf_conn *ct, void *data) 1159 { 1160 struct ctnetlink_filter *filter = data; 1161 struct nf_conntrack_tuple *tuple; 1162 u32 status; 1163 1164 if (filter == NULL) 1165 goto out; 1166 1167 /* Match entries of a given L3 protocol number. 1168 * If it is not specified, ie. l3proto == 0, 1169 * then match everything. 1170 */ 1171 if (filter->family && nf_ct_l3num(ct) != filter->family) 1172 goto ignore_entry; 1173 1174 if (filter->zone_filter && 1175 !nf_ct_zone_equal_any(ct, &filter->zone)) 1176 goto ignore_entry; 1177 1178 if (filter->orig_flags) { 1179 tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL); 1180 if (!ctnetlink_filter_match_tuple(&filter->orig, tuple, 1181 filter->orig_flags, 1182 filter->family)) 1183 goto ignore_entry; 1184 } 1185 1186 if (filter->reply_flags) { 1187 tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY); 1188 if (!ctnetlink_filter_match_tuple(&filter->reply, tuple, 1189 filter->reply_flags, 1190 filter->family)) 1191 goto ignore_entry; 1192 } 1193 1194 #ifdef CONFIG_NF_CONNTRACK_MARK 1195 if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val) 1196 goto ignore_entry; 1197 #endif 1198 status = (u32)READ_ONCE(ct->status); 1199 if ((status & filter->status.mask) != filter->status.val) 1200 goto ignore_entry; 1201 1202 out: 1203 return 1; 1204 1205 ignore_entry: 1206 return 0; 1207 } 1208 1209 static unsigned long ctnetlink_get_id(const struct nf_conn *ct) 1210 { 1211 unsigned long id = nf_ct_get_id(ct); 1212 1213 return id ? id : 1; 1214 } 1215 1216 static int 1217 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1218 { 1219 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; 1220 struct net *net = sock_net(skb->sk); 1221 unsigned long last_id = cb->args[1]; 1222 struct nf_conntrack_tuple_hash *h; 1223 struct hlist_nulls_node *n; 1224 struct nf_conn *nf_ct_evict[8]; 1225 struct nf_conn *ct; 1226 int res, i; 1227 spinlock_t *lockp; 1228 1229 i = 0; 1230 1231 local_bh_disable(); 1232 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 1233 restart: 1234 while (i) { 1235 i--; 1236 if (nf_ct_should_gc(nf_ct_evict[i])) 1237 nf_ct_kill(nf_ct_evict[i]); 1238 nf_ct_put(nf_ct_evict[i]); 1239 } 1240 1241 lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; 1242 nf_conntrack_lock(lockp); 1243 if (cb->args[0] >= nf_conntrack_htable_size) { 1244 spin_unlock(lockp); 1245 goto out; 1246 } 1247 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], 1248 hnnode) { 1249 ct = nf_ct_tuplehash_to_ctrack(h); 1250 if (nf_ct_is_expired(ct)) { 1251 /* need to defer nf_ct_kill() until lock is released */ 1252 if (i < ARRAY_SIZE(nf_ct_evict) && 1253 refcount_inc_not_zero(&ct->ct_general.use)) 1254 nf_ct_evict[i++] = ct; 1255 continue; 1256 } 1257 1258 if (!net_eq(net, nf_ct_net(ct))) 1259 continue; 1260 1261 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 1262 continue; 1263 1264 if (cb->args[1]) { 1265 if (ctnetlink_get_id(ct) != last_id) 1266 continue; 1267 cb->args[1] = 0; 1268 } 1269 if (!ctnetlink_filter_match(ct, cb->data)) 1270 continue; 1271 1272 res = 1273 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1274 cb->nlh->nlmsg_seq, 1275 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1276 ct, true, flags); 1277 if (res < 0) { 1278 cb->args[1] = ctnetlink_get_id(ct); 1279 spin_unlock(lockp); 1280 goto out; 1281 } 1282 } 1283 spin_unlock(lockp); 1284 if (cb->args[1]) { 1285 cb->args[1] = 0; 1286 goto restart; 1287 } 1288 } 1289 out: 1290 local_bh_enable(); 1291 if (last_id) { 1292 /* nf ct hash resize happened, now clear the leftover. */ 1293 if (cb->args[1] == last_id) 1294 cb->args[1] = 0; 1295 } 1296 1297 while (i) { 1298 i--; 1299 if (nf_ct_should_gc(nf_ct_evict[i])) 1300 nf_ct_kill(nf_ct_evict[i]); 1301 nf_ct_put(nf_ct_evict[i]); 1302 } 1303 1304 return skb->len; 1305 } 1306 1307 static int ipv4_nlattr_to_tuple(struct nlattr *tb[], 1308 struct nf_conntrack_tuple *t, 1309 u_int32_t flags) 1310 { 1311 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1312 if (!tb[CTA_IP_V4_SRC]) 1313 return -EINVAL; 1314 1315 t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); 1316 } 1317 1318 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { 1319 if (!tb[CTA_IP_V4_DST]) 1320 return -EINVAL; 1321 1322 t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); 1323 } 1324 1325 return 0; 1326 } 1327 1328 static int ipv6_nlattr_to_tuple(struct nlattr *tb[], 1329 struct nf_conntrack_tuple *t, 1330 u_int32_t flags) 1331 { 1332 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1333 if (!tb[CTA_IP_V6_SRC]) 1334 return -EINVAL; 1335 1336 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); 1337 } 1338 1339 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { 1340 if (!tb[CTA_IP_V6_DST]) 1341 return -EINVAL; 1342 1343 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); 1344 } 1345 1346 return 0; 1347 } 1348 1349 static int ctnetlink_parse_tuple_ip(struct nlattr *attr, 1350 struct nf_conntrack_tuple *tuple, 1351 u_int32_t flags) 1352 { 1353 struct nlattr *tb[CTA_IP_MAX+1]; 1354 int ret = 0; 1355 1356 ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, 1357 cta_ip_nla_policy, NULL); 1358 if (ret < 0) 1359 return ret; 1360 1361 switch (tuple->src.l3num) { 1362 case NFPROTO_IPV4: 1363 ret = ipv4_nlattr_to_tuple(tb, tuple, flags); 1364 break; 1365 case NFPROTO_IPV6: 1366 ret = ipv6_nlattr_to_tuple(tb, tuple, flags); 1367 break; 1368 } 1369 1370 return ret; 1371 } 1372 1373 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { 1374 [CTA_PROTO_NUM] = { .type = NLA_U8 }, 1375 }; 1376 1377 static int ctnetlink_parse_tuple_proto(struct nlattr *attr, 1378 struct nf_conntrack_tuple *tuple, 1379 u_int32_t flags) 1380 { 1381 const struct nf_conntrack_l4proto *l4proto; 1382 struct nlattr *tb[CTA_PROTO_MAX+1]; 1383 int ret = 0; 1384 1385 ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr, 1386 proto_nla_policy, NULL); 1387 if (ret < 0) 1388 return ret; 1389 1390 if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM))) 1391 return 0; 1392 1393 if (!tb[CTA_PROTO_NUM]) 1394 return -EINVAL; 1395 1396 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); 1397 1398 rcu_read_lock(); 1399 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 1400 1401 if (likely(l4proto->nlattr_to_tuple)) { 1402 ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX, 1403 l4proto->nla_policy, 1404 NULL); 1405 if (ret == 0) 1406 ret = l4proto->nlattr_to_tuple(tb, tuple, flags); 1407 } 1408 1409 rcu_read_unlock(); 1410 1411 return ret; 1412 } 1413 1414 static int 1415 ctnetlink_parse_zone(const struct nlattr *attr, 1416 struct nf_conntrack_zone *zone) 1417 { 1418 nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID, 1419 NF_CT_DEFAULT_ZONE_DIR, 0); 1420 #ifdef CONFIG_NF_CONNTRACK_ZONES 1421 if (attr) 1422 zone->id = ntohs(nla_get_be16(attr)); 1423 #else 1424 if (attr) 1425 return -EOPNOTSUPP; 1426 #endif 1427 return 0; 1428 } 1429 1430 static int 1431 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type, 1432 struct nf_conntrack_zone *zone) 1433 { 1434 int ret; 1435 1436 if (zone->id != NF_CT_DEFAULT_ZONE_ID) 1437 return -EINVAL; 1438 1439 ret = ctnetlink_parse_zone(attr, zone); 1440 if (ret < 0) 1441 return ret; 1442 1443 if (type == CTA_TUPLE_REPLY) 1444 zone->dir = NF_CT_ZONE_DIR_REPL; 1445 else 1446 zone->dir = NF_CT_ZONE_DIR_ORIG; 1447 1448 return 0; 1449 } 1450 1451 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { 1452 [CTA_TUPLE_IP] = { .type = NLA_NESTED }, 1453 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, 1454 [CTA_TUPLE_ZONE] = { .type = NLA_U16 }, 1455 }; 1456 1457 #define CTA_FILTER_F_ALL_CTA_PROTO \ 1458 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \ 1459 CTA_FILTER_F_CTA_PROTO_DST_PORT | \ 1460 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \ 1461 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \ 1462 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \ 1463 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \ 1464 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \ 1465 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID) 1466 1467 static int 1468 ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], 1469 struct nf_conntrack_tuple *tuple, u32 type, 1470 u_int8_t l3num, struct nf_conntrack_zone *zone, 1471 u_int32_t flags) 1472 { 1473 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1474 int err; 1475 1476 memset(tuple, 0, sizeof(*tuple)); 1477 1478 err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type], 1479 tuple_nla_policy, NULL); 1480 if (err < 0) 1481 return err; 1482 1483 if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) 1484 return -EOPNOTSUPP; 1485 tuple->src.l3num = l3num; 1486 1487 if (flags & CTA_FILTER_FLAG(CTA_IP_DST) || 1488 flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1489 if (!tb[CTA_TUPLE_IP]) 1490 return -EINVAL; 1491 1492 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags); 1493 if (err < 0) 1494 return err; 1495 } 1496 1497 if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) { 1498 if (!tb[CTA_TUPLE_PROTO]) 1499 return -EINVAL; 1500 1501 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags); 1502 if (err < 0) 1503 return err; 1504 } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) { 1505 /* Can't manage proto flags without a protonum */ 1506 return -EINVAL; 1507 } 1508 1509 if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) { 1510 if (!zone) 1511 return -EINVAL; 1512 1513 err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE], 1514 type, zone); 1515 if (err < 0) 1516 return err; 1517 } 1518 1519 /* orig and expect tuples get DIR_ORIGINAL */ 1520 if (type == CTA_TUPLE_REPLY) 1521 tuple->dst.dir = IP_CT_DIR_REPLY; 1522 else 1523 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 1524 1525 return 0; 1526 } 1527 1528 static int 1529 ctnetlink_parse_tuple(const struct nlattr * const cda[], 1530 struct nf_conntrack_tuple *tuple, u32 type, 1531 u_int8_t l3num, struct nf_conntrack_zone *zone) 1532 { 1533 return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone, 1534 CTA_FILTER_FLAG(ALL)); 1535 } 1536 1537 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { 1538 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING, 1539 .len = NF_CT_HELPER_NAME_LEN - 1 }, 1540 }; 1541 1542 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, 1543 struct nlattr **helpinfo) 1544 { 1545 int err; 1546 struct nlattr *tb[CTA_HELP_MAX+1]; 1547 1548 err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr, 1549 help_nla_policy, NULL); 1550 if (err < 0) 1551 return err; 1552 1553 if (!tb[CTA_HELP_NAME]) 1554 return -EINVAL; 1555 1556 *helper_name = nla_data(tb[CTA_HELP_NAME]); 1557 1558 if (tb[CTA_HELP_INFO]) 1559 *helpinfo = tb[CTA_HELP_INFO]; 1560 1561 return 0; 1562 } 1563 1564 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 1565 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, 1566 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, 1567 [CTA_STATUS] = { .type = NLA_U32 }, 1568 [CTA_PROTOINFO] = { .type = NLA_NESTED }, 1569 [CTA_HELP] = { .type = NLA_NESTED }, 1570 [CTA_NAT_SRC] = { .type = NLA_NESTED }, 1571 [CTA_TIMEOUT] = { .type = NLA_U32 }, 1572 [CTA_MARK] = { .type = NLA_U32 }, 1573 [CTA_ID] = { .type = NLA_U32 }, 1574 [CTA_NAT_DST] = { .type = NLA_NESTED }, 1575 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, 1576 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED }, 1577 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED }, 1578 [CTA_ZONE] = { .type = NLA_U16 }, 1579 [CTA_MARK_MASK] = { .type = NLA_U32 }, 1580 [CTA_LABELS] = { .type = NLA_BINARY, 1581 .len = NF_CT_LABELS_MAX_SIZE }, 1582 [CTA_LABELS_MASK] = { .type = NLA_BINARY, 1583 .len = NF_CT_LABELS_MAX_SIZE }, 1584 [CTA_FILTER] = { .type = NLA_NESTED }, 1585 [CTA_STATUS_MASK] = { .type = NLA_U32 }, 1586 [CTA_TIMESTAMP_EVENT] = { .type = NLA_REJECT }, 1587 }; 1588 1589 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) 1590 { 1591 return ctnetlink_filter_match(ct, data); 1592 } 1593 1594 static int ctnetlink_flush_conntrack(struct net *net, 1595 const struct nlattr * const cda[], 1596 u32 portid, int report, u8 family) 1597 { 1598 struct ctnetlink_filter *filter = NULL; 1599 struct nf_ct_iter_data iter = { 1600 .net = net, 1601 .portid = portid, 1602 .report = report, 1603 }; 1604 1605 if (ctnetlink_needs_filter(family, cda)) { 1606 filter = ctnetlink_alloc_filter(cda, family); 1607 if (IS_ERR(filter)) 1608 return PTR_ERR(filter); 1609 1610 iter.data = filter; 1611 } 1612 1613 nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter); 1614 kfree(filter); 1615 1616 return 0; 1617 } 1618 1619 static int ctnetlink_del_conntrack(struct sk_buff *skb, 1620 const struct nfnl_info *info, 1621 const struct nlattr * const cda[]) 1622 { 1623 u8 family = info->nfmsg->nfgen_family; 1624 struct nf_conntrack_tuple_hash *h; 1625 struct nf_conntrack_tuple tuple; 1626 struct nf_conntrack_zone zone; 1627 struct nf_conn *ct; 1628 int err; 1629 1630 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1631 if (err < 0) 1632 return err; 1633 1634 if (cda[CTA_TUPLE_ORIG] && !cda[CTA_FILTER]) 1635 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, 1636 family, &zone); 1637 else if (cda[CTA_TUPLE_REPLY] && !cda[CTA_FILTER]) 1638 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, 1639 family, &zone); 1640 else { 1641 u8 u3 = info->nfmsg->version || cda[CTA_FILTER] ? family : AF_UNSPEC; 1642 1643 return ctnetlink_flush_conntrack(info->net, cda, 1644 NETLINK_CB(skb).portid, 1645 nlmsg_report(info->nlh), u3); 1646 } 1647 1648 if (err < 0) 1649 return err; 1650 1651 h = nf_conntrack_find_get(info->net, &zone, &tuple); 1652 if (!h) 1653 return -ENOENT; 1654 1655 ct = nf_ct_tuplehash_to_ctrack(h); 1656 1657 if (cda[CTA_ID]) { 1658 __be32 id = nla_get_be32(cda[CTA_ID]); 1659 1660 if (id != (__force __be32)nf_ct_get_id(ct)) { 1661 nf_ct_put(ct); 1662 return -ENOENT; 1663 } 1664 } 1665 1666 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); 1667 nf_ct_put(ct); 1668 1669 return 0; 1670 } 1671 1672 static int ctnetlink_get_conntrack(struct sk_buff *skb, 1673 const struct nfnl_info *info, 1674 const struct nlattr * const cda[]) 1675 { 1676 u_int8_t u3 = info->nfmsg->nfgen_family; 1677 struct nf_conntrack_tuple_hash *h; 1678 struct nf_conntrack_tuple tuple; 1679 struct nf_conntrack_zone zone; 1680 struct sk_buff *skb2; 1681 struct nf_conn *ct; 1682 int err; 1683 1684 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1685 struct netlink_dump_control c = { 1686 .start = ctnetlink_start, 1687 .dump = ctnetlink_dump_table, 1688 .done = ctnetlink_done, 1689 .data = (void *)cda, 1690 }; 1691 1692 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1693 } 1694 1695 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1696 if (err < 0) 1697 return err; 1698 1699 if (cda[CTA_TUPLE_ORIG]) 1700 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, 1701 u3, &zone); 1702 else if (cda[CTA_TUPLE_REPLY]) 1703 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, 1704 u3, &zone); 1705 else 1706 return -EINVAL; 1707 1708 if (err < 0) 1709 return err; 1710 1711 h = nf_conntrack_find_get(info->net, &zone, &tuple); 1712 if (!h) 1713 return -ENOENT; 1714 1715 ct = nf_ct_tuplehash_to_ctrack(h); 1716 1717 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1718 if (!skb2) { 1719 nf_ct_put(ct); 1720 return -ENOMEM; 1721 } 1722 1723 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, 1724 info->nlh->nlmsg_seq, 1725 NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct, 1726 true, 0); 1727 nf_ct_put(ct); 1728 if (err <= 0) { 1729 kfree_skb(skb2); 1730 return -ENOMEM; 1731 } 1732 1733 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 1734 } 1735 1736 static int ctnetlink_done_list(struct netlink_callback *cb) 1737 { 1738 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1739 1740 if (ctx->last) 1741 nf_ct_put(ctx->last); 1742 1743 return 0; 1744 } 1745 1746 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1747 static int ctnetlink_dump_one_entry(struct sk_buff *skb, 1748 struct netlink_callback *cb, 1749 struct nf_conn *ct, 1750 bool dying) 1751 { 1752 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1753 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1754 u8 l3proto = nfmsg->nfgen_family; 1755 int res; 1756 1757 if (l3proto && nf_ct_l3num(ct) != l3proto) 1758 return 0; 1759 1760 if (ctx->last) { 1761 if (ct != ctx->last) 1762 return 0; 1763 1764 ctx->last = NULL; 1765 } 1766 1767 /* We can't dump extension info for the unconfirmed 1768 * list because unconfirmed conntracks can have 1769 * ct->ext reallocated (and thus freed). 1770 * 1771 * In the dying list case ct->ext can't be free'd 1772 * until after we drop pcpu->lock. 1773 */ 1774 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1775 cb->nlh->nlmsg_seq, 1776 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1777 ct, dying, 0); 1778 if (res < 0) { 1779 if (!refcount_inc_not_zero(&ct->ct_general.use)) 1780 return 0; 1781 1782 ctx->last = ct; 1783 } 1784 1785 return res; 1786 } 1787 #endif 1788 1789 static int 1790 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) 1791 { 1792 return 0; 1793 } 1794 1795 static int 1796 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) 1797 { 1798 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1799 struct nf_conn *last = ctx->last; 1800 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1801 const struct net *net = sock_net(skb->sk); 1802 struct nf_conntrack_net_ecache *ecache_net; 1803 struct nf_conntrack_tuple_hash *h; 1804 struct hlist_nulls_node *n; 1805 #endif 1806 1807 if (ctx->done) 1808 return 0; 1809 1810 ctx->last = NULL; 1811 1812 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1813 ecache_net = nf_conn_pernet_ecache(net); 1814 spin_lock_bh(&ecache_net->dying_lock); 1815 1816 hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) { 1817 struct nf_conn *ct; 1818 int res; 1819 1820 ct = nf_ct_tuplehash_to_ctrack(h); 1821 if (last && last != ct) 1822 continue; 1823 1824 res = ctnetlink_dump_one_entry(skb, cb, ct, true); 1825 if (res < 0) { 1826 spin_unlock_bh(&ecache_net->dying_lock); 1827 nf_ct_put(last); 1828 return skb->len; 1829 } 1830 1831 nf_ct_put(last); 1832 last = NULL; 1833 } 1834 1835 spin_unlock_bh(&ecache_net->dying_lock); 1836 #endif 1837 ctx->done = true; 1838 nf_ct_put(last); 1839 1840 return skb->len; 1841 } 1842 1843 static int ctnetlink_get_ct_dying(struct sk_buff *skb, 1844 const struct nfnl_info *info, 1845 const struct nlattr * const cda[]) 1846 { 1847 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1848 struct netlink_dump_control c = { 1849 .dump = ctnetlink_dump_dying, 1850 .done = ctnetlink_done_list, 1851 }; 1852 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1853 } 1854 1855 return -EOPNOTSUPP; 1856 } 1857 1858 static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb, 1859 const struct nfnl_info *info, 1860 const struct nlattr * const cda[]) 1861 { 1862 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1863 struct netlink_dump_control c = { 1864 .dump = ctnetlink_dump_unconfirmed, 1865 .done = ctnetlink_done_list, 1866 }; 1867 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1868 } 1869 1870 return -EOPNOTSUPP; 1871 } 1872 1873 #if IS_ENABLED(CONFIG_NF_NAT) 1874 static int 1875 ctnetlink_parse_nat_setup(struct nf_conn *ct, 1876 enum nf_nat_manip_type manip, 1877 const struct nlattr *attr) 1878 __must_hold(RCU) 1879 { 1880 const struct nf_nat_hook *nat_hook; 1881 int err; 1882 1883 nat_hook = rcu_dereference(nf_nat_hook); 1884 if (!nat_hook) { 1885 #ifdef CONFIG_MODULES 1886 rcu_read_unlock(); 1887 nfnl_unlock(NFNL_SUBSYS_CTNETLINK); 1888 if (request_module("nf-nat") < 0) { 1889 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1890 rcu_read_lock(); 1891 return -EOPNOTSUPP; 1892 } 1893 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1894 rcu_read_lock(); 1895 nat_hook = rcu_dereference(nf_nat_hook); 1896 if (nat_hook) 1897 return -EAGAIN; 1898 #endif 1899 return -EOPNOTSUPP; 1900 } 1901 1902 err = nat_hook->parse_nat_setup(ct, manip, attr); 1903 if (err == -EAGAIN) { 1904 #ifdef CONFIG_MODULES 1905 rcu_read_unlock(); 1906 nfnl_unlock(NFNL_SUBSYS_CTNETLINK); 1907 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) { 1908 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1909 rcu_read_lock(); 1910 return -EOPNOTSUPP; 1911 } 1912 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1913 rcu_read_lock(); 1914 #else 1915 err = -EOPNOTSUPP; 1916 #endif 1917 } 1918 return err; 1919 } 1920 #endif 1921 1922 static int 1923 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) 1924 { 1925 return nf_ct_change_status_common(ct, ntohl(nla_get_be32(cda[CTA_STATUS]))); 1926 } 1927 1928 static int 1929 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) 1930 { 1931 #if IS_ENABLED(CONFIG_NF_NAT) 1932 int ret; 1933 1934 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1935 return 0; 1936 1937 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1938 cda[CTA_NAT_DST]); 1939 if (ret < 0) 1940 return ret; 1941 1942 return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, 1943 cda[CTA_NAT_SRC]); 1944 #else 1945 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1946 return 0; 1947 return -EOPNOTSUPP; 1948 #endif 1949 } 1950 1951 static int ctnetlink_change_helper(struct nf_conn *ct, 1952 const struct nlattr * const cda[]) 1953 { 1954 struct nf_conntrack_helper *helper; 1955 struct nf_conn_help *help = nfct_help(ct); 1956 char *helpname = NULL; 1957 struct nlattr *helpinfo = NULL; 1958 int err; 1959 1960 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); 1961 if (err < 0) 1962 return err; 1963 1964 /* don't change helper of sibling connections */ 1965 if (ct->master) { 1966 /* If we try to change the helper to the same thing twice, 1967 * treat the second attempt as a no-op instead of returning 1968 * an error. 1969 */ 1970 err = -EBUSY; 1971 if (help) { 1972 rcu_read_lock(); 1973 helper = rcu_dereference(help->helper); 1974 if (helper && !strcmp(helper->name, helpname)) 1975 err = 0; 1976 rcu_read_unlock(); 1977 } 1978 1979 return err; 1980 } 1981 1982 if (!strcmp(helpname, "")) { 1983 if (help && help->helper) { 1984 /* we had a helper before ... */ 1985 nf_ct_remove_expectations(ct); 1986 RCU_INIT_POINTER(help->helper, NULL); 1987 } 1988 1989 return 0; 1990 } 1991 1992 rcu_read_lock(); 1993 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 1994 nf_ct_protonum(ct)); 1995 if (helper == NULL) { 1996 rcu_read_unlock(); 1997 return -EOPNOTSUPP; 1998 } 1999 2000 if (help) { 2001 if (rcu_access_pointer(help->helper) == helper) { 2002 /* update private helper data if allowed. */ 2003 if (helper->from_nlattr) 2004 helper->from_nlattr(helpinfo, ct); 2005 err = 0; 2006 } else 2007 err = -EBUSY; 2008 } else { 2009 /* we cannot set a helper for an existing conntrack */ 2010 err = -EOPNOTSUPP; 2011 } 2012 2013 rcu_read_unlock(); 2014 return err; 2015 } 2016 2017 static int ctnetlink_change_timeout(struct nf_conn *ct, 2018 const struct nlattr * const cda[]) 2019 { 2020 return __nf_ct_change_timeout(ct, (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ); 2021 } 2022 2023 #if defined(CONFIG_NF_CONNTRACK_MARK) 2024 static void ctnetlink_change_mark(struct nf_conn *ct, 2025 const struct nlattr * const cda[]) 2026 { 2027 u32 mark, newmark, mask = 0; 2028 2029 if (cda[CTA_MARK_MASK]) 2030 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK])); 2031 2032 mark = ntohl(nla_get_be32(cda[CTA_MARK])); 2033 newmark = (READ_ONCE(ct->mark) & mask) ^ mark; 2034 if (newmark != READ_ONCE(ct->mark)) 2035 WRITE_ONCE(ct->mark, newmark); 2036 } 2037 #endif 2038 2039 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { 2040 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, 2041 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, 2042 }; 2043 2044 static int ctnetlink_change_protoinfo(struct nf_conn *ct, 2045 const struct nlattr * const cda[]) 2046 { 2047 const struct nlattr *attr = cda[CTA_PROTOINFO]; 2048 const struct nf_conntrack_l4proto *l4proto; 2049 struct nlattr *tb[CTA_PROTOINFO_MAX+1]; 2050 int err = 0; 2051 2052 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr, 2053 protoinfo_policy, NULL); 2054 if (err < 0) 2055 return err; 2056 2057 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 2058 if (l4proto->from_nlattr) 2059 err = l4proto->from_nlattr(tb, ct); 2060 2061 return err; 2062 } 2063 2064 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = { 2065 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 }, 2066 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 }, 2067 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 }, 2068 }; 2069 2070 static int change_seq_adj(struct nf_ct_seqadj *seq, 2071 const struct nlattr * const attr) 2072 { 2073 int err; 2074 struct nlattr *cda[CTA_SEQADJ_MAX+1]; 2075 2076 err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr, 2077 seqadj_policy, NULL); 2078 if (err < 0) 2079 return err; 2080 2081 if (!cda[CTA_SEQADJ_CORRECTION_POS]) 2082 return -EINVAL; 2083 2084 seq->correction_pos = 2085 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS])); 2086 2087 if (!cda[CTA_SEQADJ_OFFSET_BEFORE]) 2088 return -EINVAL; 2089 2090 seq->offset_before = 2091 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE])); 2092 2093 if (!cda[CTA_SEQADJ_OFFSET_AFTER]) 2094 return -EINVAL; 2095 2096 seq->offset_after = 2097 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER])); 2098 2099 return 0; 2100 } 2101 2102 static int 2103 ctnetlink_change_seq_adj(struct nf_conn *ct, 2104 const struct nlattr * const cda[]) 2105 { 2106 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 2107 int ret = 0; 2108 2109 if (!seqadj) 2110 return 0; 2111 2112 spin_lock_bh(&ct->lock); 2113 if (cda[CTA_SEQ_ADJ_ORIG]) { 2114 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL], 2115 cda[CTA_SEQ_ADJ_ORIG]); 2116 if (ret < 0) 2117 goto err; 2118 2119 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 2120 } 2121 2122 if (cda[CTA_SEQ_ADJ_REPLY]) { 2123 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY], 2124 cda[CTA_SEQ_ADJ_REPLY]); 2125 if (ret < 0) 2126 goto err; 2127 2128 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 2129 } 2130 2131 spin_unlock_bh(&ct->lock); 2132 return 0; 2133 err: 2134 spin_unlock_bh(&ct->lock); 2135 return ret; 2136 } 2137 2138 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = { 2139 [CTA_SYNPROXY_ISN] = { .type = NLA_U32 }, 2140 [CTA_SYNPROXY_ITS] = { .type = NLA_U32 }, 2141 [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 }, 2142 }; 2143 2144 static int ctnetlink_change_synproxy(struct nf_conn *ct, 2145 const struct nlattr * const cda[]) 2146 { 2147 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); 2148 struct nlattr *tb[CTA_SYNPROXY_MAX + 1]; 2149 int err; 2150 2151 if (!synproxy) 2152 return 0; 2153 2154 err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX, 2155 cda[CTA_SYNPROXY], synproxy_policy, 2156 NULL); 2157 if (err < 0) 2158 return err; 2159 2160 if (!tb[CTA_SYNPROXY_ISN] || 2161 !tb[CTA_SYNPROXY_ITS] || 2162 !tb[CTA_SYNPROXY_TSOFF]) 2163 return -EINVAL; 2164 2165 synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN])); 2166 synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS])); 2167 synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF])); 2168 2169 return 0; 2170 } 2171 2172 static int 2173 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[]) 2174 { 2175 #ifdef CONFIG_NF_CONNTRACK_LABELS 2176 size_t len = nla_len(cda[CTA_LABELS]); 2177 const void *mask = cda[CTA_LABELS_MASK]; 2178 2179 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */ 2180 return -EINVAL; 2181 2182 if (mask) { 2183 if (nla_len(cda[CTA_LABELS_MASK]) == 0 || 2184 nla_len(cda[CTA_LABELS_MASK]) != len) 2185 return -EINVAL; 2186 mask = nla_data(cda[CTA_LABELS_MASK]); 2187 } 2188 2189 len /= sizeof(u32); 2190 2191 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len); 2192 #else 2193 return -EOPNOTSUPP; 2194 #endif 2195 } 2196 2197 static int 2198 ctnetlink_change_conntrack(struct nf_conn *ct, 2199 const struct nlattr * const cda[]) 2200 { 2201 int err; 2202 2203 /* only allow NAT changes and master assignation for new conntracks */ 2204 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) 2205 return -EOPNOTSUPP; 2206 2207 if (cda[CTA_HELP]) { 2208 err = ctnetlink_change_helper(ct, cda); 2209 if (err < 0) 2210 return err; 2211 } 2212 2213 if (cda[CTA_TIMEOUT]) { 2214 err = ctnetlink_change_timeout(ct, cda); 2215 if (err < 0) 2216 return err; 2217 } 2218 2219 if (cda[CTA_STATUS]) { 2220 err = ctnetlink_change_status(ct, cda); 2221 if (err < 0) 2222 return err; 2223 } 2224 2225 if (cda[CTA_PROTOINFO]) { 2226 err = ctnetlink_change_protoinfo(ct, cda); 2227 if (err < 0) 2228 return err; 2229 } 2230 2231 #if defined(CONFIG_NF_CONNTRACK_MARK) 2232 if (cda[CTA_MARK]) 2233 ctnetlink_change_mark(ct, cda); 2234 #endif 2235 2236 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { 2237 err = ctnetlink_change_seq_adj(ct, cda); 2238 if (err < 0) 2239 return err; 2240 } 2241 2242 if (cda[CTA_SYNPROXY]) { 2243 err = ctnetlink_change_synproxy(ct, cda); 2244 if (err < 0) 2245 return err; 2246 } 2247 2248 if (cda[CTA_LABELS]) { 2249 err = ctnetlink_attach_labels(ct, cda); 2250 if (err < 0) 2251 return err; 2252 } 2253 2254 return 0; 2255 } 2256 2257 static struct nf_conn * 2258 ctnetlink_create_conntrack(struct net *net, 2259 const struct nf_conntrack_zone *zone, 2260 const struct nlattr * const cda[], 2261 struct nf_conntrack_tuple *otuple, 2262 struct nf_conntrack_tuple *rtuple, 2263 u8 u3) 2264 { 2265 struct nf_conn *ct; 2266 int err = -EINVAL; 2267 struct nf_conntrack_helper *helper; 2268 struct nf_conn_tstamp *tstamp; 2269 u64 timeout; 2270 2271 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 2272 if (IS_ERR(ct)) 2273 return ERR_PTR(-ENOMEM); 2274 2275 if (!cda[CTA_TIMEOUT]) 2276 goto err1; 2277 2278 rcu_read_lock(); 2279 if (cda[CTA_HELP]) { 2280 char *helpname = NULL; 2281 struct nlattr *helpinfo = NULL; 2282 2283 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); 2284 if (err < 0) 2285 goto err2; 2286 2287 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2288 nf_ct_protonum(ct)); 2289 if (helper == NULL) { 2290 rcu_read_unlock(); 2291 #ifdef CONFIG_MODULES 2292 if (request_module("nfct-helper-%s", helpname) < 0) { 2293 err = -EOPNOTSUPP; 2294 goto err1; 2295 } 2296 2297 rcu_read_lock(); 2298 helper = __nf_conntrack_helper_find(helpname, 2299 nf_ct_l3num(ct), 2300 nf_ct_protonum(ct)); 2301 if (helper) { 2302 err = -EAGAIN; 2303 goto err2; 2304 } 2305 rcu_read_unlock(); 2306 #endif 2307 err = -EOPNOTSUPP; 2308 goto err1; 2309 } else { 2310 struct nf_conn_help *help; 2311 2312 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 2313 if (help == NULL) { 2314 err = -ENOMEM; 2315 goto err2; 2316 } 2317 /* set private helper data if allowed. */ 2318 if (helper->from_nlattr) 2319 helper->from_nlattr(helpinfo, ct); 2320 2321 /* disable helper auto-assignment for this entry */ 2322 ct->status |= IPS_HELPER; 2323 RCU_INIT_POINTER(help->helper, helper); 2324 } 2325 } 2326 2327 err = ctnetlink_setup_nat(ct, cda); 2328 if (err < 0) 2329 goto err2; 2330 2331 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 2332 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 2333 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 2334 nf_ct_labels_ext_add(ct); 2335 nfct_seqadj_ext_add(ct); 2336 nfct_synproxy_ext_add(ct); 2337 2338 /* we must add conntrack extensions before confirmation. */ 2339 ct->status |= IPS_CONFIRMED; 2340 2341 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 2342 __nf_ct_set_timeout(ct, timeout); 2343 2344 if (cda[CTA_STATUS]) { 2345 err = ctnetlink_change_status(ct, cda); 2346 if (err < 0) 2347 goto err2; 2348 } 2349 2350 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { 2351 err = ctnetlink_change_seq_adj(ct, cda); 2352 if (err < 0) 2353 goto err2; 2354 } 2355 2356 memset(&ct->proto, 0, sizeof(ct->proto)); 2357 if (cda[CTA_PROTOINFO]) { 2358 err = ctnetlink_change_protoinfo(ct, cda); 2359 if (err < 0) 2360 goto err2; 2361 } 2362 2363 if (cda[CTA_SYNPROXY]) { 2364 err = ctnetlink_change_synproxy(ct, cda); 2365 if (err < 0) 2366 goto err2; 2367 } 2368 2369 #if defined(CONFIG_NF_CONNTRACK_MARK) 2370 if (cda[CTA_MARK]) 2371 ctnetlink_change_mark(ct, cda); 2372 #endif 2373 2374 /* setup master conntrack: this is a confirmed expectation */ 2375 if (cda[CTA_TUPLE_MASTER]) { 2376 struct nf_conntrack_tuple master; 2377 struct nf_conntrack_tuple_hash *master_h; 2378 struct nf_conn *master_ct; 2379 2380 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, 2381 u3, NULL); 2382 if (err < 0) 2383 goto err2; 2384 2385 master_h = nf_conntrack_find_get(net, zone, &master); 2386 if (master_h == NULL) { 2387 err = -ENOENT; 2388 goto err2; 2389 } 2390 master_ct = nf_ct_tuplehash_to_ctrack(master_h); 2391 __set_bit(IPS_EXPECTED_BIT, &ct->status); 2392 ct->master = master_ct; 2393 } 2394 tstamp = nf_conn_tstamp_find(ct); 2395 if (tstamp) 2396 tstamp->start = ktime_get_real_ns(); 2397 2398 err = nf_conntrack_hash_check_insert(ct); 2399 if (err < 0) 2400 goto err3; 2401 2402 rcu_read_unlock(); 2403 2404 return ct; 2405 2406 err3: 2407 if (ct->master) 2408 nf_ct_put(ct->master); 2409 err2: 2410 rcu_read_unlock(); 2411 err1: 2412 nf_conntrack_free(ct); 2413 return ERR_PTR(err); 2414 } 2415 2416 static int ctnetlink_new_conntrack(struct sk_buff *skb, 2417 const struct nfnl_info *info, 2418 const struct nlattr * const cda[]) 2419 { 2420 struct nf_conntrack_tuple otuple, rtuple; 2421 struct nf_conntrack_tuple_hash *h = NULL; 2422 u_int8_t u3 = info->nfmsg->nfgen_family; 2423 struct nf_conntrack_zone zone; 2424 struct nf_conn *ct; 2425 int err; 2426 2427 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 2428 if (err < 0) 2429 return err; 2430 2431 if (cda[CTA_TUPLE_ORIG]) { 2432 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, 2433 u3, &zone); 2434 if (err < 0) 2435 return err; 2436 } 2437 2438 if (cda[CTA_TUPLE_REPLY]) { 2439 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, 2440 u3, &zone); 2441 if (err < 0) 2442 return err; 2443 } 2444 2445 if (cda[CTA_TUPLE_ORIG]) 2446 h = nf_conntrack_find_get(info->net, &zone, &otuple); 2447 else if (cda[CTA_TUPLE_REPLY]) 2448 h = nf_conntrack_find_get(info->net, &zone, &rtuple); 2449 2450 if (h == NULL) { 2451 err = -ENOENT; 2452 if (info->nlh->nlmsg_flags & NLM_F_CREATE) { 2453 enum ip_conntrack_events events; 2454 2455 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) 2456 return -EINVAL; 2457 if (otuple.dst.protonum != rtuple.dst.protonum) 2458 return -EINVAL; 2459 2460 ct = ctnetlink_create_conntrack(info->net, &zone, cda, 2461 &otuple, &rtuple, u3); 2462 if (IS_ERR(ct)) 2463 return PTR_ERR(ct); 2464 2465 err = 0; 2466 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 2467 events = 1 << IPCT_RELATED; 2468 else 2469 events = 1 << IPCT_NEW; 2470 2471 if (cda[CTA_LABELS] && 2472 ctnetlink_attach_labels(ct, cda) == 0) 2473 events |= (1 << IPCT_LABEL); 2474 2475 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 2476 (1 << IPCT_ASSURED) | 2477 (1 << IPCT_HELPER) | 2478 (1 << IPCT_PROTOINFO) | 2479 (1 << IPCT_SEQADJ) | 2480 (1 << IPCT_MARK) | 2481 (1 << IPCT_SYNPROXY) | 2482 events, 2483 ct, NETLINK_CB(skb).portid, 2484 nlmsg_report(info->nlh)); 2485 nf_ct_put(ct); 2486 } 2487 2488 return err; 2489 } 2490 /* implicit 'else' */ 2491 2492 err = -EEXIST; 2493 ct = nf_ct_tuplehash_to_ctrack(h); 2494 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) { 2495 err = ctnetlink_change_conntrack(ct, cda); 2496 if (err == 0) { 2497 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 2498 (1 << IPCT_ASSURED) | 2499 (1 << IPCT_HELPER) | 2500 (1 << IPCT_LABEL) | 2501 (1 << IPCT_PROTOINFO) | 2502 (1 << IPCT_SEQADJ) | 2503 (1 << IPCT_MARK) | 2504 (1 << IPCT_SYNPROXY), 2505 ct, NETLINK_CB(skb).portid, 2506 nlmsg_report(info->nlh)); 2507 } 2508 } 2509 2510 nf_ct_put(ct); 2511 return err; 2512 } 2513 2514 static int 2515 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 2516 __u16 cpu, const struct ip_conntrack_stat *st) 2517 { 2518 struct nlmsghdr *nlh; 2519 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 2520 2521 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, 2522 IPCTNL_MSG_CT_GET_STATS_CPU); 2523 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 2524 NFNETLINK_V0, htons(cpu)); 2525 if (!nlh) 2526 goto nlmsg_failure; 2527 2528 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || 2529 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || 2530 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || 2531 nla_put_be32(skb, CTA_STATS_INSERT_FAILED, 2532 htonl(st->insert_failed)) || 2533 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) || 2534 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) || 2535 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) || 2536 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART, 2537 htonl(st->search_restart)) || 2538 nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE, 2539 htonl(st->clash_resolve)) || 2540 nla_put_be32(skb, CTA_STATS_CHAIN_TOOLONG, 2541 htonl(st->chaintoolong))) 2542 goto nla_put_failure; 2543 2544 nlmsg_end(skb, nlh); 2545 return skb->len; 2546 2547 nla_put_failure: 2548 nlmsg_failure: 2549 nlmsg_cancel(skb, nlh); 2550 return -1; 2551 } 2552 2553 static int 2554 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) 2555 { 2556 int cpu; 2557 struct net *net = sock_net(skb->sk); 2558 2559 if (cb->args[0] == nr_cpu_ids) 2560 return 0; 2561 2562 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 2563 const struct ip_conntrack_stat *st; 2564 2565 if (!cpu_possible(cpu)) 2566 continue; 2567 2568 st = per_cpu_ptr(net->ct.stat, cpu); 2569 if (ctnetlink_ct_stat_cpu_fill_info(skb, 2570 NETLINK_CB(cb->skb).portid, 2571 cb->nlh->nlmsg_seq, 2572 cpu, st) < 0) 2573 break; 2574 } 2575 cb->args[0] = cpu; 2576 2577 return skb->len; 2578 } 2579 2580 static int ctnetlink_stat_ct_cpu(struct sk_buff *skb, 2581 const struct nfnl_info *info, 2582 const struct nlattr * const cda[]) 2583 { 2584 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 2585 struct netlink_dump_control c = { 2586 .dump = ctnetlink_ct_stat_cpu_dump, 2587 }; 2588 return netlink_dump_start(info->sk, skb, info->nlh, &c); 2589 } 2590 2591 return 0; 2592 } 2593 2594 static int 2595 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, 2596 struct net *net) 2597 { 2598 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 2599 unsigned int nr_conntracks; 2600 struct nlmsghdr *nlh; 2601 2602 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); 2603 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 2604 NFNETLINK_V0, 0); 2605 if (!nlh) 2606 goto nlmsg_failure; 2607 2608 nr_conntracks = nf_conntrack_count(net); 2609 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks))) 2610 goto nla_put_failure; 2611 2612 if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max))) 2613 goto nla_put_failure; 2614 2615 nlmsg_end(skb, nlh); 2616 return skb->len; 2617 2618 nla_put_failure: 2619 nlmsg_failure: 2620 nlmsg_cancel(skb, nlh); 2621 return -1; 2622 } 2623 2624 static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info, 2625 const struct nlattr * const cda[]) 2626 { 2627 struct sk_buff *skb2; 2628 int err; 2629 2630 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2631 if (skb2 == NULL) 2632 return -ENOMEM; 2633 2634 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid, 2635 info->nlh->nlmsg_seq, 2636 NFNL_MSG_TYPE(info->nlh->nlmsg_type), 2637 sock_net(skb->sk)); 2638 if (err <= 0) { 2639 kfree_skb(skb2); 2640 return -ENOMEM; 2641 } 2642 2643 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 2644 } 2645 2646 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 2647 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, 2648 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, 2649 [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, 2650 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 2651 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 2652 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING, 2653 .len = NF_CT_HELPER_NAME_LEN - 1 }, 2654 [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, 2655 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, 2656 [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, 2657 [CTA_EXPECT_NAT] = { .type = NLA_NESTED }, 2658 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING }, 2659 }; 2660 2661 static struct nf_conntrack_expect * 2662 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct, 2663 struct nf_conntrack_helper *helper, 2664 struct nf_conntrack_tuple *tuple, 2665 struct nf_conntrack_tuple *mask); 2666 2667 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 2668 static size_t 2669 ctnetlink_glue_build_size(const struct nf_conn *ct) 2670 { 2671 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 2672 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 2673 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 2674 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ 2675 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ 2676 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ 2677 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ 2678 + nla_total_size(0) /* CTA_PROTOINFO */ 2679 + nla_total_size(0) /* CTA_HELP */ 2680 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 2681 + ctnetlink_secctx_size(ct) 2682 + ctnetlink_acct_size(ct) 2683 + ctnetlink_timestamp_size(ct) 2684 #if IS_ENABLED(CONFIG_NF_NAT) 2685 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 2686 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ 2687 #endif 2688 #ifdef CONFIG_NF_CONNTRACK_MARK 2689 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2690 #endif 2691 #ifdef CONFIG_NF_CONNTRACK_ZONES 2692 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ 2693 #endif 2694 + ctnetlink_proto_size(ct) 2695 ; 2696 } 2697 2698 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) 2699 { 2700 const struct nf_conntrack_zone *zone; 2701 struct nlattr *nest_parms; 2702 2703 zone = nf_ct_zone(ct); 2704 2705 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 2706 if (!nest_parms) 2707 goto nla_put_failure; 2708 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 2709 goto nla_put_failure; 2710 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 2711 NF_CT_ZONE_DIR_ORIG) < 0) 2712 goto nla_put_failure; 2713 nla_nest_end(skb, nest_parms); 2714 2715 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 2716 if (!nest_parms) 2717 goto nla_put_failure; 2718 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 2719 goto nla_put_failure; 2720 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 2721 NF_CT_ZONE_DIR_REPL) < 0) 2722 goto nla_put_failure; 2723 nla_nest_end(skb, nest_parms); 2724 2725 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 2726 NF_CT_DEFAULT_ZONE_DIR) < 0) 2727 goto nla_put_failure; 2728 2729 if (ctnetlink_dump_id(skb, ct) < 0) 2730 goto nla_put_failure; 2731 2732 if (ctnetlink_dump_status(skb, ct) < 0) 2733 goto nla_put_failure; 2734 2735 if (ctnetlink_dump_timeout(skb, ct, false) < 0) 2736 goto nla_put_failure; 2737 2738 if (ctnetlink_dump_protoinfo(skb, ct, false) < 0) 2739 goto nla_put_failure; 2740 2741 if (ctnetlink_dump_acct(skb, ct, IPCTNL_MSG_CT_GET) < 0 || 2742 ctnetlink_dump_timestamp(skb, ct) < 0) 2743 goto nla_put_failure; 2744 2745 if (ctnetlink_dump_helpinfo(skb, ct) < 0) 2746 goto nla_put_failure; 2747 2748 #ifdef CONFIG_NF_CONNTRACK_SECMARK 2749 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0) 2750 goto nla_put_failure; 2751 #endif 2752 if (ct->master && ctnetlink_dump_master(skb, ct) < 0) 2753 goto nla_put_failure; 2754 2755 if ((ct->status & IPS_SEQ_ADJUST) && 2756 ctnetlink_dump_ct_seq_adj(skb, ct) < 0) 2757 goto nla_put_failure; 2758 2759 if (ctnetlink_dump_ct_synproxy(skb, ct) < 0) 2760 goto nla_put_failure; 2761 2762 #ifdef CONFIG_NF_CONNTRACK_MARK 2763 if (ctnetlink_dump_mark(skb, ct, true) < 0) 2764 goto nla_put_failure; 2765 #endif 2766 if (ctnetlink_dump_labels(skb, ct) < 0) 2767 goto nla_put_failure; 2768 return 0; 2769 2770 nla_put_failure: 2771 return -ENOSPC; 2772 } 2773 2774 static int 2775 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct, 2776 enum ip_conntrack_info ctinfo, 2777 u_int16_t ct_attr, u_int16_t ct_info_attr) 2778 { 2779 struct nlattr *nest_parms; 2780 2781 nest_parms = nla_nest_start(skb, ct_attr); 2782 if (!nest_parms) 2783 goto nla_put_failure; 2784 2785 if (__ctnetlink_glue_build(skb, ct) < 0) 2786 goto nla_put_failure; 2787 2788 nla_nest_end(skb, nest_parms); 2789 2790 if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo))) 2791 goto nla_put_failure; 2792 2793 return 0; 2794 2795 nla_put_failure: 2796 return -ENOSPC; 2797 } 2798 2799 static int 2800 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[]) 2801 { 2802 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); 2803 unsigned long d = ct->status ^ status; 2804 2805 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 2806 /* SEEN_REPLY bit can only be set */ 2807 return -EBUSY; 2808 2809 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 2810 /* ASSURED bit can only be set */ 2811 return -EBUSY; 2812 2813 /* This check is less strict than ctnetlink_change_status() 2814 * because callers often flip IPS_EXPECTED bits when sending 2815 * an NFQA_CT attribute to the kernel. So ignore the 2816 * unchangeable bits but do not error out. Also user programs 2817 * are allowed to clear the bits that they are allowed to change. 2818 */ 2819 __nf_ct_change_status(ct, status, ~status); 2820 return 0; 2821 } 2822 2823 static int 2824 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct) 2825 { 2826 int err; 2827 2828 if (cda[CTA_TIMEOUT]) { 2829 err = ctnetlink_change_timeout(ct, cda); 2830 if (err < 0) 2831 return err; 2832 } 2833 if (cda[CTA_STATUS]) { 2834 err = ctnetlink_update_status(ct, cda); 2835 if (err < 0) 2836 return err; 2837 } 2838 if (cda[CTA_HELP]) { 2839 err = ctnetlink_change_helper(ct, cda); 2840 if (err < 0) 2841 return err; 2842 } 2843 if (cda[CTA_LABELS]) { 2844 err = ctnetlink_attach_labels(ct, cda); 2845 if (err < 0) 2846 return err; 2847 } 2848 #if defined(CONFIG_NF_CONNTRACK_MARK) 2849 if (cda[CTA_MARK]) { 2850 ctnetlink_change_mark(ct, cda); 2851 } 2852 #endif 2853 return 0; 2854 } 2855 2856 static int 2857 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct) 2858 { 2859 struct nlattr *cda[CTA_MAX+1]; 2860 int ret; 2861 2862 ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy, 2863 NULL); 2864 if (ret < 0) 2865 return ret; 2866 2867 return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct); 2868 } 2869 2870 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda, 2871 const struct nf_conn *ct, 2872 struct nf_conntrack_tuple *tuple, 2873 struct nf_conntrack_tuple *mask) 2874 { 2875 int err; 2876 2877 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE, 2878 nf_ct_l3num(ct), NULL); 2879 if (err < 0) 2880 return err; 2881 2882 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK, 2883 nf_ct_l3num(ct), NULL); 2884 } 2885 2886 static int 2887 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, 2888 u32 portid, u32 report) 2889 { 2890 struct nlattr *cda[CTA_EXPECT_MAX+1]; 2891 struct nf_conntrack_tuple tuple, mask; 2892 struct nf_conntrack_helper *helper = NULL; 2893 struct nf_conntrack_expect *exp; 2894 int err; 2895 2896 err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr, 2897 exp_nla_policy, NULL); 2898 if (err < 0) 2899 return err; 2900 2901 err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda, 2902 ct, &tuple, &mask); 2903 if (err < 0) 2904 return err; 2905 2906 if (cda[CTA_EXPECT_HELP_NAME]) { 2907 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 2908 2909 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2910 nf_ct_protonum(ct)); 2911 if (helper == NULL) 2912 return -EOPNOTSUPP; 2913 } 2914 2915 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct, 2916 helper, &tuple, &mask); 2917 if (IS_ERR(exp)) 2918 return PTR_ERR(exp); 2919 2920 err = nf_ct_expect_related_report(exp, portid, report, 0); 2921 nf_ct_expect_put(exp); 2922 return err; 2923 } 2924 2925 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, 2926 enum ip_conntrack_info ctinfo, int diff) 2927 { 2928 if (!(ct->status & IPS_NAT_MASK)) 2929 return; 2930 2931 nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff); 2932 } 2933 2934 static const struct nfnl_ct_hook ctnetlink_glue_hook = { 2935 .build_size = ctnetlink_glue_build_size, 2936 .build = ctnetlink_glue_build, 2937 .parse = ctnetlink_glue_parse, 2938 .attach_expect = ctnetlink_glue_attach_expect, 2939 .seq_adjust = ctnetlink_glue_seqadj, 2940 }; 2941 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */ 2942 2943 /*********************************************************************** 2944 * EXPECT 2945 ***********************************************************************/ 2946 2947 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2948 const struct nf_conntrack_tuple *tuple, 2949 u32 type) 2950 { 2951 struct nlattr *nest_parms; 2952 2953 nest_parms = nla_nest_start(skb, type); 2954 if (!nest_parms) 2955 goto nla_put_failure; 2956 if (ctnetlink_dump_tuples(skb, tuple) < 0) 2957 goto nla_put_failure; 2958 nla_nest_end(skb, nest_parms); 2959 2960 return 0; 2961 2962 nla_put_failure: 2963 return -1; 2964 } 2965 2966 static int ctnetlink_exp_dump_mask(struct sk_buff *skb, 2967 const struct nf_conntrack_tuple *tuple, 2968 const struct nf_conntrack_tuple_mask *mask) 2969 { 2970 const struct nf_conntrack_l4proto *l4proto; 2971 struct nf_conntrack_tuple m; 2972 struct nlattr *nest_parms; 2973 int ret; 2974 2975 memset(&m, 0xFF, sizeof(m)); 2976 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); 2977 m.src.u.all = mask->src.u.all; 2978 m.src.l3num = tuple->src.l3num; 2979 m.dst.protonum = tuple->dst.protonum; 2980 2981 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK); 2982 if (!nest_parms) 2983 goto nla_put_failure; 2984 2985 rcu_read_lock(); 2986 ret = ctnetlink_dump_tuples_ip(skb, &m); 2987 if (ret >= 0) { 2988 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 2989 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 2990 } 2991 rcu_read_unlock(); 2992 2993 if (unlikely(ret < 0)) 2994 goto nla_put_failure; 2995 2996 nla_nest_end(skb, nest_parms); 2997 2998 return 0; 2999 3000 nla_put_failure: 3001 return -1; 3002 } 3003 3004 #if IS_ENABLED(CONFIG_NF_NAT) 3005 static const union nf_inet_addr any_addr; 3006 #endif 3007 3008 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) 3009 { 3010 static siphash_aligned_key_t exp_id_seed; 3011 unsigned long a, b, c, d; 3012 3013 net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); 3014 3015 a = (unsigned long)exp; 3016 b = (unsigned long)exp->helper; 3017 c = (unsigned long)exp->master; 3018 d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed); 3019 3020 #ifdef CONFIG_64BIT 3021 return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed); 3022 #else 3023 return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed); 3024 #endif 3025 } 3026 3027 static int 3028 ctnetlink_exp_dump_expect(struct sk_buff *skb, 3029 const struct nf_conntrack_expect *exp) 3030 { 3031 struct nf_conn *master = exp->master; 3032 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; 3033 struct nf_conn_help *help; 3034 #if IS_ENABLED(CONFIG_NF_NAT) 3035 struct nlattr *nest_parms; 3036 struct nf_conntrack_tuple nat_tuple = {}; 3037 #endif 3038 struct nf_ct_helper_expectfn *expfn; 3039 3040 if (timeout < 0) 3041 timeout = 0; 3042 3043 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) 3044 goto nla_put_failure; 3045 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) 3046 goto nla_put_failure; 3047 if (ctnetlink_exp_dump_tuple(skb, 3048 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 3049 CTA_EXPECT_MASTER) < 0) 3050 goto nla_put_failure; 3051 3052 #if IS_ENABLED(CONFIG_NF_NAT) 3053 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) || 3054 exp->saved_proto.all) { 3055 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT); 3056 if (!nest_parms) 3057 goto nla_put_failure; 3058 3059 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir))) 3060 goto nla_put_failure; 3061 3062 nat_tuple.src.l3num = nf_ct_l3num(master); 3063 nat_tuple.src.u3 = exp->saved_addr; 3064 nat_tuple.dst.protonum = nf_ct_protonum(master); 3065 nat_tuple.src.u = exp->saved_proto; 3066 3067 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple, 3068 CTA_EXPECT_NAT_TUPLE) < 0) 3069 goto nla_put_failure; 3070 nla_nest_end(skb, nest_parms); 3071 } 3072 #endif 3073 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || 3074 nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) || 3075 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || 3076 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) 3077 goto nla_put_failure; 3078 help = nfct_help(master); 3079 if (help) { 3080 struct nf_conntrack_helper *helper; 3081 3082 helper = rcu_dereference(help->helper); 3083 if (helper && 3084 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name)) 3085 goto nla_put_failure; 3086 } 3087 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); 3088 if (expfn != NULL && 3089 nla_put_string(skb, CTA_EXPECT_FN, expfn->name)) 3090 goto nla_put_failure; 3091 3092 return 0; 3093 3094 nla_put_failure: 3095 return -1; 3096 } 3097 3098 static int 3099 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 3100 int event, const struct nf_conntrack_expect *exp) 3101 { 3102 struct nlmsghdr *nlh; 3103 unsigned int flags = portid ? NLM_F_MULTI : 0; 3104 3105 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); 3106 nlh = nfnl_msg_put(skb, portid, seq, event, flags, 3107 exp->tuple.src.l3num, NFNETLINK_V0, 0); 3108 if (!nlh) 3109 goto nlmsg_failure; 3110 3111 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 3112 goto nla_put_failure; 3113 3114 nlmsg_end(skb, nlh); 3115 return skb->len; 3116 3117 nlmsg_failure: 3118 nla_put_failure: 3119 nlmsg_cancel(skb, nlh); 3120 return -1; 3121 } 3122 3123 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3124 static int 3125 ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item) 3126 { 3127 struct nf_conntrack_expect *exp = item->exp; 3128 struct net *net = nf_ct_exp_net(exp); 3129 struct nlmsghdr *nlh; 3130 struct sk_buff *skb; 3131 unsigned int type, group; 3132 int flags = 0; 3133 3134 if (events & (1 << IPEXP_DESTROY)) { 3135 type = IPCTNL_MSG_EXP_DELETE; 3136 group = NFNLGRP_CONNTRACK_EXP_DESTROY; 3137 } else if (events & (1 << IPEXP_NEW)) { 3138 type = IPCTNL_MSG_EXP_NEW; 3139 flags = NLM_F_CREATE|NLM_F_EXCL; 3140 group = NFNLGRP_CONNTRACK_EXP_NEW; 3141 } else 3142 return 0; 3143 3144 if (!item->report && !nfnetlink_has_listeners(net, group)) 3145 return 0; 3146 3147 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 3148 if (skb == NULL) 3149 goto errout; 3150 3151 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type); 3152 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, 3153 exp->tuple.src.l3num, NFNETLINK_V0, 0); 3154 if (!nlh) 3155 goto nlmsg_failure; 3156 3157 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 3158 goto nla_put_failure; 3159 3160 nlmsg_end(skb, nlh); 3161 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); 3162 return 0; 3163 3164 nla_put_failure: 3165 nlmsg_cancel(skb, nlh); 3166 nlmsg_failure: 3167 kfree_skb(skb); 3168 errout: 3169 nfnetlink_set_err(net, 0, 0, -ENOBUFS); 3170 return 0; 3171 } 3172 #endif 3173 3174 static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp) 3175 { 3176 unsigned long id = (unsigned long)exp; 3177 3178 id += nf_ct_get_id(exp->master); 3179 id += exp->class; 3180 3181 return id ? id : 1; 3182 } 3183 3184 static int 3185 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3186 { 3187 struct net *net = sock_net(skb->sk); 3188 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3189 u_int8_t l3proto = nfmsg->nfgen_family; 3190 unsigned long last_id = cb->args[1]; 3191 struct nf_conntrack_expect *exp; 3192 3193 rcu_read_lock(); 3194 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 3195 restart: 3196 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], 3197 hnode) { 3198 if (l3proto && exp->tuple.src.l3num != l3proto) 3199 continue; 3200 3201 if (!net_eq(nf_ct_net(exp->master), net)) 3202 continue; 3203 3204 if (cb->args[1]) { 3205 if (ctnetlink_exp_id(exp) != last_id) 3206 continue; 3207 cb->args[1] = 0; 3208 } 3209 if (ctnetlink_exp_fill_info(skb, 3210 NETLINK_CB(cb->skb).portid, 3211 cb->nlh->nlmsg_seq, 3212 IPCTNL_MSG_EXP_NEW, 3213 exp) < 0) { 3214 cb->args[1] = ctnetlink_exp_id(exp); 3215 goto out; 3216 } 3217 } 3218 if (cb->args[1]) { 3219 cb->args[1] = 0; 3220 goto restart; 3221 } 3222 } 3223 out: 3224 rcu_read_unlock(); 3225 return skb->len; 3226 } 3227 3228 static int 3229 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3230 { 3231 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3232 struct nf_conn *ct = cb->data; 3233 struct nf_conn_help *help = nfct_help(ct); 3234 u_int8_t l3proto = nfmsg->nfgen_family; 3235 unsigned long last_id = cb->args[1]; 3236 struct nf_conntrack_expect *exp; 3237 3238 if (cb->args[0]) 3239 return 0; 3240 3241 rcu_read_lock(); 3242 3243 restart: 3244 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { 3245 if (l3proto && exp->tuple.src.l3num != l3proto) 3246 continue; 3247 if (cb->args[1]) { 3248 if (ctnetlink_exp_id(exp) != last_id) 3249 continue; 3250 cb->args[1] = 0; 3251 } 3252 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid, 3253 cb->nlh->nlmsg_seq, 3254 IPCTNL_MSG_EXP_NEW, 3255 exp) < 0) { 3256 cb->args[1] = ctnetlink_exp_id(exp); 3257 goto out; 3258 } 3259 } 3260 if (cb->args[1]) { 3261 cb->args[1] = 0; 3262 goto restart; 3263 } 3264 cb->args[0] = 1; 3265 out: 3266 rcu_read_unlock(); 3267 return skb->len; 3268 } 3269 3270 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, 3271 struct sk_buff *skb, 3272 const struct nlmsghdr *nlh, 3273 const struct nlattr * const cda[], 3274 struct netlink_ext_ack *extack) 3275 { 3276 int err; 3277 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3278 u_int8_t u3 = nfmsg->nfgen_family; 3279 struct nf_conntrack_tuple tuple; 3280 struct nf_conntrack_tuple_hash *h; 3281 struct nf_conn *ct; 3282 struct nf_conntrack_zone zone; 3283 struct netlink_dump_control c = { 3284 .dump = ctnetlink_exp_ct_dump_table, 3285 }; 3286 3287 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, 3288 u3, NULL); 3289 if (err < 0) 3290 return err; 3291 3292 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3293 if (err < 0) 3294 return err; 3295 3296 h = nf_conntrack_find_get(net, &zone, &tuple); 3297 if (!h) 3298 return -ENOENT; 3299 3300 ct = nf_ct_tuplehash_to_ctrack(h); 3301 /* No expectation linked to this connection tracking. */ 3302 if (!nfct_help(ct)) { 3303 nf_ct_put(ct); 3304 return 0; 3305 } 3306 3307 c.data = ct; 3308 3309 err = netlink_dump_start(ctnl, skb, nlh, &c); 3310 nf_ct_put(ct); 3311 3312 return err; 3313 } 3314 3315 static int ctnetlink_get_expect(struct sk_buff *skb, 3316 const struct nfnl_info *info, 3317 const struct nlattr * const cda[]) 3318 { 3319 u_int8_t u3 = info->nfmsg->nfgen_family; 3320 struct nf_conntrack_tuple tuple; 3321 struct nf_conntrack_expect *exp; 3322 struct nf_conntrack_zone zone; 3323 struct sk_buff *skb2; 3324 int err; 3325 3326 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 3327 if (cda[CTA_EXPECT_MASTER]) 3328 return ctnetlink_dump_exp_ct(info->net, info->sk, skb, 3329 info->nlh, cda, 3330 info->extack); 3331 else { 3332 struct netlink_dump_control c = { 3333 .dump = ctnetlink_exp_dump_table, 3334 }; 3335 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3336 } 3337 } 3338 3339 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3340 if (err < 0) 3341 return err; 3342 3343 if (cda[CTA_EXPECT_TUPLE]) 3344 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3345 u3, NULL); 3346 else if (cda[CTA_EXPECT_MASTER]) 3347 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, 3348 u3, NULL); 3349 else 3350 return -EINVAL; 3351 3352 if (err < 0) 3353 return err; 3354 3355 exp = nf_ct_expect_find_get(info->net, &zone, &tuple); 3356 if (!exp) 3357 return -ENOENT; 3358 3359 if (cda[CTA_EXPECT_ID]) { 3360 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); 3361 3362 if (id != nf_expect_get_id(exp)) { 3363 nf_ct_expect_put(exp); 3364 return -ENOENT; 3365 } 3366 } 3367 3368 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3369 if (!skb2) { 3370 nf_ct_expect_put(exp); 3371 return -ENOMEM; 3372 } 3373 3374 rcu_read_lock(); 3375 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid, 3376 info->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, 3377 exp); 3378 rcu_read_unlock(); 3379 nf_ct_expect_put(exp); 3380 if (err <= 0) { 3381 kfree_skb(skb2); 3382 return -ENOMEM; 3383 } 3384 3385 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 3386 } 3387 3388 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data) 3389 { 3390 struct nf_conntrack_helper *helper; 3391 const struct nf_conn_help *m_help; 3392 const char *name = data; 3393 3394 m_help = nfct_help(exp->master); 3395 3396 helper = rcu_dereference(m_help->helper); 3397 if (!helper) 3398 return false; 3399 3400 return strcmp(helper->name, name) == 0; 3401 } 3402 3403 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data) 3404 { 3405 return true; 3406 } 3407 3408 static int ctnetlink_del_expect(struct sk_buff *skb, 3409 const struct nfnl_info *info, 3410 const struct nlattr * const cda[]) 3411 { 3412 u_int8_t u3 = info->nfmsg->nfgen_family; 3413 struct nf_conntrack_expect *exp; 3414 struct nf_conntrack_tuple tuple; 3415 struct nf_conntrack_zone zone; 3416 int err; 3417 3418 if (cda[CTA_EXPECT_TUPLE]) { 3419 /* delete a single expect by tuple */ 3420 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3421 if (err < 0) 3422 return err; 3423 3424 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3425 u3, NULL); 3426 if (err < 0) 3427 return err; 3428 3429 /* bump usage count to 2 */ 3430 exp = nf_ct_expect_find_get(info->net, &zone, &tuple); 3431 if (!exp) 3432 return -ENOENT; 3433 3434 if (cda[CTA_EXPECT_ID]) { 3435 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); 3436 3437 if (id != nf_expect_get_id(exp)) { 3438 nf_ct_expect_put(exp); 3439 return -ENOENT; 3440 } 3441 } 3442 3443 /* after list removal, usage count == 1 */ 3444 spin_lock_bh(&nf_conntrack_expect_lock); 3445 if (timer_delete(&exp->timeout)) { 3446 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, 3447 nlmsg_report(info->nlh)); 3448 nf_ct_expect_put(exp); 3449 } 3450 spin_unlock_bh(&nf_conntrack_expect_lock); 3451 /* have to put what we 'get' above. 3452 * after this line usage count == 0 */ 3453 nf_ct_expect_put(exp); 3454 } else if (cda[CTA_EXPECT_HELP_NAME]) { 3455 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3456 3457 nf_ct_expect_iterate_net(info->net, expect_iter_name, name, 3458 NETLINK_CB(skb).portid, 3459 nlmsg_report(info->nlh)); 3460 } else { 3461 /* This basically means we have to flush everything*/ 3462 nf_ct_expect_iterate_net(info->net, expect_iter_all, NULL, 3463 NETLINK_CB(skb).portid, 3464 nlmsg_report(info->nlh)); 3465 } 3466 3467 return 0; 3468 } 3469 static int 3470 ctnetlink_change_expect(struct nf_conntrack_expect *x, 3471 const struct nlattr * const cda[]) 3472 { 3473 if (cda[CTA_EXPECT_TIMEOUT]) { 3474 if (!timer_delete(&x->timeout)) 3475 return -ETIME; 3476 3477 x->timeout.expires = jiffies + 3478 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; 3479 add_timer(&x->timeout); 3480 } 3481 return 0; 3482 } 3483 3484 #if IS_ENABLED(CONFIG_NF_NAT) 3485 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { 3486 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, 3487 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, 3488 }; 3489 #endif 3490 3491 static int 3492 ctnetlink_parse_expect_nat(const struct nlattr *attr, 3493 struct nf_conntrack_expect *exp, 3494 u_int8_t u3) 3495 { 3496 #if IS_ENABLED(CONFIG_NF_NAT) 3497 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1]; 3498 struct nf_conntrack_tuple nat_tuple = {}; 3499 int err; 3500 3501 err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr, 3502 exp_nat_nla_policy, NULL); 3503 if (err < 0) 3504 return err; 3505 3506 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE]) 3507 return -EINVAL; 3508 3509 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb, 3510 &nat_tuple, CTA_EXPECT_NAT_TUPLE, 3511 u3, NULL); 3512 if (err < 0) 3513 return err; 3514 3515 exp->saved_addr = nat_tuple.src.u3; 3516 exp->saved_proto = nat_tuple.src.u; 3517 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR])); 3518 3519 return 0; 3520 #else 3521 return -EOPNOTSUPP; 3522 #endif 3523 } 3524 3525 static struct nf_conntrack_expect * 3526 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, 3527 struct nf_conntrack_helper *helper, 3528 struct nf_conntrack_tuple *tuple, 3529 struct nf_conntrack_tuple *mask) 3530 { 3531 u_int32_t class = 0; 3532 struct nf_conntrack_expect *exp; 3533 struct nf_conn_help *help; 3534 int err; 3535 3536 help = nfct_help(ct); 3537 if (!help) 3538 return ERR_PTR(-EOPNOTSUPP); 3539 3540 if (cda[CTA_EXPECT_CLASS] && helper) { 3541 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); 3542 if (class > helper->expect_class_max) 3543 return ERR_PTR(-EINVAL); 3544 } 3545 exp = nf_ct_expect_alloc(ct); 3546 if (!exp) 3547 return ERR_PTR(-ENOMEM); 3548 3549 if (cda[CTA_EXPECT_FLAGS]) { 3550 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); 3551 exp->flags &= ~NF_CT_EXPECT_USERSPACE; 3552 } else { 3553 exp->flags = 0; 3554 } 3555 if (cda[CTA_EXPECT_FN]) { 3556 const char *name = nla_data(cda[CTA_EXPECT_FN]); 3557 struct nf_ct_helper_expectfn *expfn; 3558 3559 expfn = nf_ct_helper_expectfn_find_by_name(name); 3560 if (expfn == NULL) { 3561 err = -EINVAL; 3562 goto err_out; 3563 } 3564 exp->expectfn = expfn->expectfn; 3565 } else 3566 exp->expectfn = NULL; 3567 3568 exp->class = class; 3569 exp->master = ct; 3570 exp->helper = helper; 3571 exp->tuple = *tuple; 3572 exp->mask.src.u3 = mask->src.u3; 3573 exp->mask.src.u.all = mask->src.u.all; 3574 3575 if (cda[CTA_EXPECT_NAT]) { 3576 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT], 3577 exp, nf_ct_l3num(ct)); 3578 if (err < 0) 3579 goto err_out; 3580 } 3581 return exp; 3582 err_out: 3583 nf_ct_expect_put(exp); 3584 return ERR_PTR(err); 3585 } 3586 3587 static int 3588 ctnetlink_create_expect(struct net *net, 3589 const struct nf_conntrack_zone *zone, 3590 const struct nlattr * const cda[], 3591 u_int8_t u3, u32 portid, int report) 3592 { 3593 struct nf_conntrack_tuple tuple, mask, master_tuple; 3594 struct nf_conntrack_tuple_hash *h = NULL; 3595 struct nf_conntrack_helper *helper = NULL; 3596 struct nf_conntrack_expect *exp; 3597 struct nf_conn *ct; 3598 int err; 3599 3600 /* caller guarantees that those three CTA_EXPECT_* exist */ 3601 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3602 u3, NULL); 3603 if (err < 0) 3604 return err; 3605 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, 3606 u3, NULL); 3607 if (err < 0) 3608 return err; 3609 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, 3610 u3, NULL); 3611 if (err < 0) 3612 return err; 3613 3614 /* Look for master conntrack of this expectation */ 3615 h = nf_conntrack_find_get(net, zone, &master_tuple); 3616 if (!h) 3617 return -ENOENT; 3618 ct = nf_ct_tuplehash_to_ctrack(h); 3619 3620 rcu_read_lock(); 3621 if (cda[CTA_EXPECT_HELP_NAME]) { 3622 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3623 3624 helper = __nf_conntrack_helper_find(helpname, u3, 3625 nf_ct_protonum(ct)); 3626 if (helper == NULL) { 3627 rcu_read_unlock(); 3628 #ifdef CONFIG_MODULES 3629 if (request_module("nfct-helper-%s", helpname) < 0) { 3630 err = -EOPNOTSUPP; 3631 goto err_ct; 3632 } 3633 rcu_read_lock(); 3634 helper = __nf_conntrack_helper_find(helpname, u3, 3635 nf_ct_protonum(ct)); 3636 if (helper) { 3637 err = -EAGAIN; 3638 goto err_rcu; 3639 } 3640 rcu_read_unlock(); 3641 #endif 3642 err = -EOPNOTSUPP; 3643 goto err_ct; 3644 } 3645 } 3646 3647 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3648 if (IS_ERR(exp)) { 3649 err = PTR_ERR(exp); 3650 goto err_rcu; 3651 } 3652 3653 err = nf_ct_expect_related_report(exp, portid, report, 0); 3654 nf_ct_expect_put(exp); 3655 err_rcu: 3656 rcu_read_unlock(); 3657 err_ct: 3658 nf_ct_put(ct); 3659 return err; 3660 } 3661 3662 static int ctnetlink_new_expect(struct sk_buff *skb, 3663 const struct nfnl_info *info, 3664 const struct nlattr * const cda[]) 3665 { 3666 u_int8_t u3 = info->nfmsg->nfgen_family; 3667 struct nf_conntrack_tuple tuple; 3668 struct nf_conntrack_expect *exp; 3669 struct nf_conntrack_zone zone; 3670 int err; 3671 3672 if (!cda[CTA_EXPECT_TUPLE] 3673 || !cda[CTA_EXPECT_MASK] 3674 || !cda[CTA_EXPECT_MASTER]) 3675 return -EINVAL; 3676 3677 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3678 if (err < 0) 3679 return err; 3680 3681 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3682 u3, NULL); 3683 if (err < 0) 3684 return err; 3685 3686 spin_lock_bh(&nf_conntrack_expect_lock); 3687 exp = __nf_ct_expect_find(info->net, &zone, &tuple); 3688 if (!exp) { 3689 spin_unlock_bh(&nf_conntrack_expect_lock); 3690 err = -ENOENT; 3691 if (info->nlh->nlmsg_flags & NLM_F_CREATE) { 3692 err = ctnetlink_create_expect(info->net, &zone, cda, u3, 3693 NETLINK_CB(skb).portid, 3694 nlmsg_report(info->nlh)); 3695 } 3696 return err; 3697 } 3698 3699 err = -EEXIST; 3700 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) 3701 err = ctnetlink_change_expect(exp, cda); 3702 spin_unlock_bh(&nf_conntrack_expect_lock); 3703 3704 return err; 3705 } 3706 3707 static int 3708 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, 3709 const struct ip_conntrack_stat *st) 3710 { 3711 struct nlmsghdr *nlh; 3712 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 3713 3714 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, 3715 IPCTNL_MSG_EXP_GET_STATS_CPU); 3716 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 3717 NFNETLINK_V0, htons(cpu)); 3718 if (!nlh) 3719 goto nlmsg_failure; 3720 3721 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) || 3722 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) || 3723 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete))) 3724 goto nla_put_failure; 3725 3726 nlmsg_end(skb, nlh); 3727 return skb->len; 3728 3729 nla_put_failure: 3730 nlmsg_failure: 3731 nlmsg_cancel(skb, nlh); 3732 return -1; 3733 } 3734 3735 static int 3736 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) 3737 { 3738 int cpu; 3739 struct net *net = sock_net(skb->sk); 3740 3741 if (cb->args[0] == nr_cpu_ids) 3742 return 0; 3743 3744 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 3745 const struct ip_conntrack_stat *st; 3746 3747 if (!cpu_possible(cpu)) 3748 continue; 3749 3750 st = per_cpu_ptr(net->ct.stat, cpu); 3751 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid, 3752 cb->nlh->nlmsg_seq, 3753 cpu, st) < 0) 3754 break; 3755 } 3756 cb->args[0] = cpu; 3757 3758 return skb->len; 3759 } 3760 3761 static int ctnetlink_stat_exp_cpu(struct sk_buff *skb, 3762 const struct nfnl_info *info, 3763 const struct nlattr * const cda[]) 3764 { 3765 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 3766 struct netlink_dump_control c = { 3767 .dump = ctnetlink_exp_stat_cpu_dump, 3768 }; 3769 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3770 } 3771 3772 return 0; 3773 } 3774 3775 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3776 static struct nf_ct_event_notifier ctnl_notifier = { 3777 .ct_event = ctnetlink_conntrack_event, 3778 .exp_event = ctnetlink_expect_event, 3779 }; 3780 #endif 3781 3782 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { 3783 [IPCTNL_MSG_CT_NEW] = { 3784 .call = ctnetlink_new_conntrack, 3785 .type = NFNL_CB_MUTEX, 3786 .attr_count = CTA_MAX, 3787 .policy = ct_nla_policy 3788 }, 3789 [IPCTNL_MSG_CT_GET] = { 3790 .call = ctnetlink_get_conntrack, 3791 .type = NFNL_CB_MUTEX, 3792 .attr_count = CTA_MAX, 3793 .policy = ct_nla_policy 3794 }, 3795 [IPCTNL_MSG_CT_DELETE] = { 3796 .call = ctnetlink_del_conntrack, 3797 .type = NFNL_CB_MUTEX, 3798 .attr_count = CTA_MAX, 3799 .policy = ct_nla_policy 3800 }, 3801 [IPCTNL_MSG_CT_GET_CTRZERO] = { 3802 .call = ctnetlink_get_conntrack, 3803 .type = NFNL_CB_MUTEX, 3804 .attr_count = CTA_MAX, 3805 .policy = ct_nla_policy 3806 }, 3807 [IPCTNL_MSG_CT_GET_STATS_CPU] = { 3808 .call = ctnetlink_stat_ct_cpu, 3809 .type = NFNL_CB_MUTEX, 3810 }, 3811 [IPCTNL_MSG_CT_GET_STATS] = { 3812 .call = ctnetlink_stat_ct, 3813 .type = NFNL_CB_MUTEX, 3814 }, 3815 [IPCTNL_MSG_CT_GET_DYING] = { 3816 .call = ctnetlink_get_ct_dying, 3817 .type = NFNL_CB_MUTEX, 3818 }, 3819 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { 3820 .call = ctnetlink_get_ct_unconfirmed, 3821 .type = NFNL_CB_MUTEX, 3822 }, 3823 }; 3824 3825 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { 3826 [IPCTNL_MSG_EXP_GET] = { 3827 .call = ctnetlink_get_expect, 3828 .type = NFNL_CB_MUTEX, 3829 .attr_count = CTA_EXPECT_MAX, 3830 .policy = exp_nla_policy 3831 }, 3832 [IPCTNL_MSG_EXP_NEW] = { 3833 .call = ctnetlink_new_expect, 3834 .type = NFNL_CB_MUTEX, 3835 .attr_count = CTA_EXPECT_MAX, 3836 .policy = exp_nla_policy 3837 }, 3838 [IPCTNL_MSG_EXP_DELETE] = { 3839 .call = ctnetlink_del_expect, 3840 .type = NFNL_CB_MUTEX, 3841 .attr_count = CTA_EXPECT_MAX, 3842 .policy = exp_nla_policy 3843 }, 3844 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { 3845 .call = ctnetlink_stat_exp_cpu, 3846 .type = NFNL_CB_MUTEX, 3847 }, 3848 }; 3849 3850 static const struct nfnetlink_subsystem ctnl_subsys = { 3851 .name = "conntrack", 3852 .subsys_id = NFNL_SUBSYS_CTNETLINK, 3853 .cb_count = IPCTNL_MSG_MAX, 3854 .cb = ctnl_cb, 3855 }; 3856 3857 static const struct nfnetlink_subsystem ctnl_exp_subsys = { 3858 .name = "conntrack_expect", 3859 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, 3860 .cb_count = IPCTNL_MSG_EXP_MAX, 3861 .cb = ctnl_exp_cb, 3862 }; 3863 3864 MODULE_ALIAS("ip_conntrack_netlink"); 3865 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 3866 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); 3867 3868 static int __net_init ctnetlink_net_init(struct net *net) 3869 { 3870 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3871 nf_conntrack_register_notifier(net, &ctnl_notifier); 3872 #endif 3873 return 0; 3874 } 3875 3876 static void ctnetlink_net_pre_exit(struct net *net) 3877 { 3878 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3879 nf_conntrack_unregister_notifier(net); 3880 #endif 3881 } 3882 3883 static struct pernet_operations ctnetlink_net_ops = { 3884 .init = ctnetlink_net_init, 3885 .pre_exit = ctnetlink_net_pre_exit, 3886 }; 3887 3888 static int __init ctnetlink_init(void) 3889 { 3890 int ret; 3891 3892 NL_ASSERT_CTX_FITS(struct ctnetlink_list_dump_ctx); 3893 3894 ret = nfnetlink_subsys_register(&ctnl_subsys); 3895 if (ret < 0) { 3896 pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); 3897 goto err_out; 3898 } 3899 3900 ret = nfnetlink_subsys_register(&ctnl_exp_subsys); 3901 if (ret < 0) { 3902 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); 3903 goto err_unreg_subsys; 3904 } 3905 3906 ret = register_pernet_subsys(&ctnetlink_net_ops); 3907 if (ret < 0) { 3908 pr_err("ctnetlink_init: cannot register pernet operations\n"); 3909 goto err_unreg_exp_subsys; 3910 } 3911 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3912 /* setup interaction between nf_queue and nf_conntrack_netlink. */ 3913 RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook); 3914 #endif 3915 return 0; 3916 3917 err_unreg_exp_subsys: 3918 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 3919 err_unreg_subsys: 3920 nfnetlink_subsys_unregister(&ctnl_subsys); 3921 err_out: 3922 return ret; 3923 } 3924 3925 static void __exit ctnetlink_exit(void) 3926 { 3927 unregister_pernet_subsys(&ctnetlink_net_ops); 3928 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 3929 nfnetlink_subsys_unregister(&ctnl_subsys); 3930 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3931 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3932 #endif 3933 synchronize_rcu(); 3934 } 3935 3936 module_init(ctnetlink_init); 3937 module_exit(ctnetlink_exit); 3938