1 /* Connection tracking via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org> 8 * 9 * Initial connection tracking via netlink development funded and 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 11 * 12 * Further development of this code funded by Astaro AG (http://www.astaro.com) 13 * 14 * This software may be used and distributed according to the terms 15 * of the GNU General Public License, incorporated herein by reference. 16 */ 17 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/rculist.h> 22 #include <linux/rculist_nulls.h> 23 #include <linux/types.h> 24 #include <linux/timer.h> 25 #include <linux/security.h> 26 #include <linux/skbuff.h> 27 #include <linux/errno.h> 28 #include <linux/netlink.h> 29 #include <linux/spinlock.h> 30 #include <linux/interrupt.h> 31 #include <linux/slab.h> 32 #include <linux/siphash.h> 33 34 #include <linux/netfilter.h> 35 #include <net/netlink.h> 36 #include <net/sock.h> 37 #include <net/netfilter/nf_conntrack.h> 38 #include <net/netfilter/nf_conntrack_core.h> 39 #include <net/netfilter/nf_conntrack_expect.h> 40 #include <net/netfilter/nf_conntrack_helper.h> 41 #include <net/netfilter/nf_conntrack_seqadj.h> 42 #include <net/netfilter/nf_conntrack_l4proto.h> 43 #include <net/netfilter/nf_conntrack_tuple.h> 44 #include <net/netfilter/nf_conntrack_acct.h> 45 #include <net/netfilter/nf_conntrack_zones.h> 46 #include <net/netfilter/nf_conntrack_timestamp.h> 47 #include <net/netfilter/nf_conntrack_labels.h> 48 #include <net/netfilter/nf_conntrack_synproxy.h> 49 #if IS_ENABLED(CONFIG_NF_NAT) 50 #include <net/netfilter/nf_nat.h> 51 #include <net/netfilter/nf_nat_helper.h> 52 #endif 53 54 #include <linux/netfilter/nfnetlink.h> 55 #include <linux/netfilter/nfnetlink_conntrack.h> 56 57 #include "nf_internals.h" 58 59 MODULE_LICENSE("GPL"); 60 MODULE_DESCRIPTION("List and change connection tracking table"); 61 62 struct ctnetlink_list_dump_ctx { 63 unsigned long last_id; 64 unsigned int cpu; 65 bool done; 66 }; 67 68 static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, 69 const struct nf_conntrack_tuple *tuple, 70 const struct nf_conntrack_l4proto *l4proto) 71 { 72 int ret = 0; 73 struct nlattr *nest_parms; 74 75 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO); 76 if (!nest_parms) 77 goto nla_put_failure; 78 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) 79 goto nla_put_failure; 80 81 if (likely(l4proto->tuple_to_nlattr)) 82 ret = l4proto->tuple_to_nlattr(skb, tuple); 83 84 nla_nest_end(skb, nest_parms); 85 86 return ret; 87 88 nla_put_failure: 89 return -1; 90 } 91 92 static int ipv4_tuple_to_nlattr(struct sk_buff *skb, 93 const struct nf_conntrack_tuple *tuple) 94 { 95 if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || 96 nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) 97 return -EMSGSIZE; 98 return 0; 99 } 100 101 static int ipv6_tuple_to_nlattr(struct sk_buff *skb, 102 const struct nf_conntrack_tuple *tuple) 103 { 104 if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) || 105 nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6)) 106 return -EMSGSIZE; 107 return 0; 108 } 109 110 static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, 111 const struct nf_conntrack_tuple *tuple) 112 { 113 int ret = 0; 114 struct nlattr *nest_parms; 115 116 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP); 117 if (!nest_parms) 118 goto nla_put_failure; 119 120 switch (tuple->src.l3num) { 121 case NFPROTO_IPV4: 122 ret = ipv4_tuple_to_nlattr(skb, tuple); 123 break; 124 case NFPROTO_IPV6: 125 ret = ipv6_tuple_to_nlattr(skb, tuple); 126 break; 127 } 128 129 nla_nest_end(skb, nest_parms); 130 131 return ret; 132 133 nla_put_failure: 134 return -1; 135 } 136 137 static int ctnetlink_dump_tuples(struct sk_buff *skb, 138 const struct nf_conntrack_tuple *tuple) 139 { 140 const struct nf_conntrack_l4proto *l4proto; 141 int ret; 142 143 rcu_read_lock(); 144 ret = ctnetlink_dump_tuples_ip(skb, tuple); 145 146 if (ret >= 0) { 147 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 148 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); 149 } 150 rcu_read_unlock(); 151 return ret; 152 } 153 154 static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype, 155 const struct nf_conntrack_zone *zone, int dir) 156 { 157 if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir) 158 return 0; 159 if (nla_put_be16(skb, attrtype, htons(zone->id))) 160 goto nla_put_failure; 161 return 0; 162 163 nla_put_failure: 164 return -1; 165 } 166 167 static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) 168 { 169 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) 170 goto nla_put_failure; 171 return 0; 172 173 nla_put_failure: 174 return -1; 175 } 176 177 static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct, 178 bool skip_zero) 179 { 180 long timeout; 181 182 if (nf_ct_is_confirmed(ct)) 183 timeout = nf_ct_expires(ct) / HZ; 184 else 185 timeout = ct->timeout / HZ; 186 187 if (skip_zero && timeout == 0) 188 return 0; 189 190 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout))) 191 goto nla_put_failure; 192 return 0; 193 194 nla_put_failure: 195 return -1; 196 } 197 198 static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct, 199 bool destroy) 200 { 201 const struct nf_conntrack_l4proto *l4proto; 202 struct nlattr *nest_proto; 203 int ret; 204 205 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 206 if (!l4proto->to_nlattr) 207 return 0; 208 209 nest_proto = nla_nest_start(skb, CTA_PROTOINFO); 210 if (!nest_proto) 211 goto nla_put_failure; 212 213 ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy); 214 215 nla_nest_end(skb, nest_proto); 216 217 return ret; 218 219 nla_put_failure: 220 return -1; 221 } 222 223 static int ctnetlink_dump_helpinfo(struct sk_buff *skb, 224 const struct nf_conn *ct) 225 { 226 struct nlattr *nest_helper; 227 const struct nf_conn_help *help = nfct_help(ct); 228 struct nf_conntrack_helper *helper; 229 230 if (!help) 231 return 0; 232 233 rcu_read_lock(); 234 helper = rcu_dereference(help->helper); 235 if (!helper) 236 goto out; 237 238 nest_helper = nla_nest_start(skb, CTA_HELP); 239 if (!nest_helper) 240 goto nla_put_failure; 241 if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) 242 goto nla_put_failure; 243 244 if (helper->to_nlattr) 245 helper->to_nlattr(skb, ct); 246 247 nla_nest_end(skb, nest_helper); 248 out: 249 rcu_read_unlock(); 250 return 0; 251 252 nla_put_failure: 253 rcu_read_unlock(); 254 return -1; 255 } 256 257 static int 258 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct, 259 enum ip_conntrack_dir dir, int type) 260 { 261 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; 262 struct nf_conn_counter *counter = acct->counter; 263 struct nlattr *nest_count; 264 u64 pkts, bytes; 265 266 if (type == IPCTNL_MSG_CT_GET_CTRZERO) { 267 pkts = atomic64_xchg(&counter[dir].packets, 0); 268 bytes = atomic64_xchg(&counter[dir].bytes, 0); 269 } else { 270 pkts = atomic64_read(&counter[dir].packets); 271 bytes = atomic64_read(&counter[dir].bytes); 272 } 273 274 nest_count = nla_nest_start(skb, attr); 275 if (!nest_count) 276 goto nla_put_failure; 277 278 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts), 279 CTA_COUNTERS_PAD) || 280 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes), 281 CTA_COUNTERS_PAD)) 282 goto nla_put_failure; 283 284 nla_nest_end(skb, nest_count); 285 286 return 0; 287 288 nla_put_failure: 289 return -1; 290 } 291 292 static int 293 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type) 294 { 295 struct nf_conn_acct *acct = nf_conn_acct_find(ct); 296 297 if (!acct) 298 return 0; 299 300 if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0) 301 return -1; 302 if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0) 303 return -1; 304 305 return 0; 306 } 307 308 static int 309 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) 310 { 311 struct nlattr *nest_count; 312 const struct nf_conn_tstamp *tstamp; 313 314 tstamp = nf_conn_tstamp_find(ct); 315 if (!tstamp) 316 return 0; 317 318 nest_count = nla_nest_start(skb, CTA_TIMESTAMP); 319 if (!nest_count) 320 goto nla_put_failure; 321 322 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start), 323 CTA_TIMESTAMP_PAD) || 324 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, 325 cpu_to_be64(tstamp->stop), 326 CTA_TIMESTAMP_PAD))) 327 goto nla_put_failure; 328 nla_nest_end(skb, nest_count); 329 330 return 0; 331 332 nla_put_failure: 333 return -1; 334 } 335 336 #ifdef CONFIG_NF_CONNTRACK_MARK 337 static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct, 338 bool dump) 339 { 340 u32 mark = READ_ONCE(ct->mark); 341 342 if (!mark && !dump) 343 return 0; 344 345 if (nla_put_be32(skb, CTA_MARK, htonl(mark))) 346 goto nla_put_failure; 347 return 0; 348 349 nla_put_failure: 350 return -1; 351 } 352 #else 353 #define ctnetlink_dump_mark(a, b, c) (0) 354 #endif 355 356 #ifdef CONFIG_NF_CONNTRACK_SECMARK 357 static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) 358 { 359 struct nlattr *nest_secctx; 360 struct lsm_context ctx; 361 int ret; 362 363 ret = security_secid_to_secctx(ct->secmark, &ctx); 364 if (ret < 0) 365 return 0; 366 367 ret = -1; 368 nest_secctx = nla_nest_start(skb, CTA_SECCTX); 369 if (!nest_secctx) 370 goto nla_put_failure; 371 372 if (nla_put_string(skb, CTA_SECCTX_NAME, ctx.context)) 373 goto nla_put_failure; 374 nla_nest_end(skb, nest_secctx); 375 376 ret = 0; 377 nla_put_failure: 378 security_release_secctx(&ctx); 379 return ret; 380 } 381 #else 382 #define ctnetlink_dump_secctx(a, b) (0) 383 #endif 384 385 #ifdef CONFIG_NF_CONNTRACK_EVENTS 386 static int 387 ctnetlink_dump_event_timestamp(struct sk_buff *skb, const struct nf_conn *ct) 388 { 389 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 390 const struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct); 391 392 if (e) { 393 u64 ts = local64_read(&e->timestamp); 394 395 if (ts) 396 return nla_put_be64(skb, CTA_TIMESTAMP_EVENT, 397 cpu_to_be64(ts), CTA_TIMESTAMP_PAD); 398 } 399 #endif 400 return 0; 401 } 402 403 static inline int ctnetlink_label_size(const struct nf_conn *ct) 404 { 405 struct nf_conn_labels *labels = nf_ct_labels_find(ct); 406 407 if (!labels) 408 return 0; 409 return nla_total_size(sizeof(labels->bits)); 410 } 411 #endif 412 413 static int 414 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) 415 { 416 struct nf_conn_labels *labels = nf_ct_labels_find(ct); 417 unsigned int i; 418 419 if (!labels) 420 return 0; 421 422 i = 0; 423 do { 424 if (labels->bits[i] != 0) 425 return nla_put(skb, CTA_LABELS, sizeof(labels->bits), 426 labels->bits); 427 i++; 428 } while (i < ARRAY_SIZE(labels->bits)); 429 430 return 0; 431 } 432 433 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 434 435 static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) 436 { 437 struct nlattr *nest_parms; 438 439 if (!(ct->status & IPS_EXPECTED)) 440 return 0; 441 442 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER); 443 if (!nest_parms) 444 goto nla_put_failure; 445 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) 446 goto nla_put_failure; 447 nla_nest_end(skb, nest_parms); 448 449 return 0; 450 451 nla_put_failure: 452 return -1; 453 } 454 455 static int 456 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type) 457 { 458 struct nlattr *nest_parms; 459 460 nest_parms = nla_nest_start(skb, type); 461 if (!nest_parms) 462 goto nla_put_failure; 463 464 if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS, 465 htonl(seq->correction_pos)) || 466 nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE, 467 htonl(seq->offset_before)) || 468 nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER, 469 htonl(seq->offset_after))) 470 goto nla_put_failure; 471 472 nla_nest_end(skb, nest_parms); 473 474 return 0; 475 476 nla_put_failure: 477 return -1; 478 } 479 480 static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct) 481 { 482 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 483 struct nf_ct_seqadj *seq; 484 485 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj) 486 return 0; 487 488 spin_lock_bh(&ct->lock); 489 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL]; 490 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1) 491 goto err; 492 493 seq = &seqadj->seq[IP_CT_DIR_REPLY]; 494 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1) 495 goto err; 496 497 spin_unlock_bh(&ct->lock); 498 return 0; 499 err: 500 spin_unlock_bh(&ct->lock); 501 return -1; 502 } 503 504 static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct) 505 { 506 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); 507 struct nlattr *nest_parms; 508 509 if (!synproxy) 510 return 0; 511 512 nest_parms = nla_nest_start(skb, CTA_SYNPROXY); 513 if (!nest_parms) 514 goto nla_put_failure; 515 516 if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) || 517 nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) || 518 nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff))) 519 goto nla_put_failure; 520 521 nla_nest_end(skb, nest_parms); 522 523 return 0; 524 525 nla_put_failure: 526 return -1; 527 } 528 529 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) 530 { 531 __be32 id = (__force __be32)nf_ct_get_id(ct); 532 533 if (nla_put_be32(skb, CTA_ID, id)) 534 goto nla_put_failure; 535 return 0; 536 537 nla_put_failure: 538 return -1; 539 } 540 541 static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) 542 { 543 if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use)))) 544 goto nla_put_failure; 545 return 0; 546 547 nla_put_failure: 548 return -1; 549 } 550 551 /* all these functions access ct->ext. Caller must either hold a reference 552 * on ct or prevent its deletion by holding either the bucket spinlock or 553 * pcpu dying list lock. 554 */ 555 static int ctnetlink_dump_extinfo(struct sk_buff *skb, 556 struct nf_conn *ct, u32 type) 557 { 558 if (ctnetlink_dump_acct(skb, ct, type) < 0 || 559 ctnetlink_dump_timestamp(skb, ct) < 0 || 560 ctnetlink_dump_helpinfo(skb, ct) < 0 || 561 ctnetlink_dump_labels(skb, ct) < 0 || 562 ctnetlink_dump_ct_seq_adj(skb, ct) < 0 || 563 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 564 return -1; 565 566 return 0; 567 } 568 569 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) 570 { 571 if (ctnetlink_dump_status(skb, ct) < 0 || 572 ctnetlink_dump_mark(skb, ct, true) < 0 || 573 ctnetlink_dump_secctx(skb, ct) < 0 || 574 ctnetlink_dump_id(skb, ct) < 0 || 575 ctnetlink_dump_use(skb, ct) < 0 || 576 ctnetlink_dump_master(skb, ct) < 0) 577 return -1; 578 579 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) && 580 (ctnetlink_dump_timeout(skb, ct, false) < 0 || 581 ctnetlink_dump_protoinfo(skb, ct, false) < 0)) 582 return -1; 583 584 return 0; 585 } 586 587 static int 588 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, 589 struct nf_conn *ct, bool extinfo, unsigned int flags) 590 { 591 const struct nf_conntrack_zone *zone; 592 struct nlmsghdr *nlh; 593 struct nlattr *nest_parms; 594 unsigned int event; 595 596 if (portid) 597 flags |= NLM_F_MULTI; 598 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); 599 nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct), 600 NFNETLINK_V0, 0); 601 if (!nlh) 602 goto nlmsg_failure; 603 604 zone = nf_ct_zone(ct); 605 606 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 607 if (!nest_parms) 608 goto nla_put_failure; 609 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 610 goto nla_put_failure; 611 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 612 NF_CT_ZONE_DIR_ORIG) < 0) 613 goto nla_put_failure; 614 nla_nest_end(skb, nest_parms); 615 616 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 617 if (!nest_parms) 618 goto nla_put_failure; 619 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 620 goto nla_put_failure; 621 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 622 NF_CT_ZONE_DIR_REPL) < 0) 623 goto nla_put_failure; 624 nla_nest_end(skb, nest_parms); 625 626 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 627 NF_CT_DEFAULT_ZONE_DIR) < 0) 628 goto nla_put_failure; 629 630 if (ctnetlink_dump_info(skb, ct) < 0) 631 goto nla_put_failure; 632 if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0) 633 goto nla_put_failure; 634 635 nlmsg_end(skb, nlh); 636 return skb->len; 637 638 nlmsg_failure: 639 nla_put_failure: 640 nlmsg_cancel(skb, nlh); 641 return -1; 642 } 643 644 static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = { 645 [CTA_IP_V4_SRC] = { .type = NLA_U32 }, 646 [CTA_IP_V4_DST] = { .type = NLA_U32 }, 647 [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 }, 648 [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 }, 649 }; 650 651 #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS) 652 static size_t ctnetlink_proto_size(const struct nf_conn *ct) 653 { 654 const struct nf_conntrack_l4proto *l4proto; 655 size_t len, len4 = 0; 656 657 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); 658 len *= 3u; /* ORIG, REPLY, MASTER */ 659 660 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 661 len += l4proto->nlattr_size; 662 if (l4proto->nlattr_tuple_size) { 663 len4 = l4proto->nlattr_tuple_size(); 664 len4 *= 3u; /* ORIG, REPLY, MASTER */ 665 } 666 667 return len + len4; 668 } 669 670 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct) 671 { 672 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) 673 return 0; 674 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ 675 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ 676 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ 677 ; 678 } 679 680 static inline int ctnetlink_secctx_size(const struct nf_conn *ct) 681 { 682 #ifdef CONFIG_NF_CONNTRACK_SECMARK 683 int ret; 684 685 ret = security_secid_to_secctx(ct->secmark, NULL); 686 if (ret < 0) 687 return 0; 688 689 return nla_total_size(0) /* CTA_SECCTX */ 690 + nla_total_size(sizeof(char) * ret); /* CTA_SECCTX_NAME */ 691 #else 692 return 0; 693 #endif 694 } 695 696 static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) 697 { 698 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 699 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) 700 return 0; 701 return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t)); 702 #else 703 return 0; 704 #endif 705 } 706 #endif 707 708 #ifdef CONFIG_NF_CONNTRACK_EVENTS 709 static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) 710 { 711 return NLMSG_ALIGN(sizeof(struct nfgenmsg)) 712 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 713 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 714 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 715 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ 716 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ 717 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ 718 + ctnetlink_acct_size(ct) 719 + ctnetlink_timestamp_size(ct) 720 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ 721 + nla_total_size(0) /* CTA_PROTOINFO */ 722 + nla_total_size(0) /* CTA_HELP */ 723 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 724 + ctnetlink_secctx_size(ct) 725 #if IS_ENABLED(CONFIG_NF_NAT) 726 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 727 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ 728 #endif 729 #ifdef CONFIG_NF_CONNTRACK_MARK 730 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 731 #endif 732 #ifdef CONFIG_NF_CONNTRACK_ZONES 733 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ 734 #endif 735 + ctnetlink_proto_size(ct) 736 + ctnetlink_label_size(ct) 737 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 738 + nla_total_size(sizeof(u64)) /* CTA_TIMESTAMP_EVENT */ 739 #endif 740 ; 741 } 742 743 static int 744 ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) 745 { 746 const struct nf_conntrack_zone *zone; 747 struct net *net; 748 struct nlmsghdr *nlh; 749 struct nlattr *nest_parms; 750 struct nf_conn *ct = item->ct; 751 struct sk_buff *skb; 752 unsigned int type; 753 unsigned int flags = 0, group; 754 int err; 755 756 if (events & (1 << IPCT_DESTROY)) { 757 type = IPCTNL_MSG_CT_DELETE; 758 group = NFNLGRP_CONNTRACK_DESTROY; 759 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { 760 type = IPCTNL_MSG_CT_NEW; 761 flags = NLM_F_CREATE|NLM_F_EXCL; 762 group = NFNLGRP_CONNTRACK_NEW; 763 } else if (events) { 764 type = IPCTNL_MSG_CT_NEW; 765 group = NFNLGRP_CONNTRACK_UPDATE; 766 } else 767 return 0; 768 769 net = nf_ct_net(ct); 770 if (!item->report && !nfnetlink_has_listeners(net, group)) 771 return 0; 772 773 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); 774 if (skb == NULL) 775 goto errout; 776 777 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type); 778 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct), 779 NFNETLINK_V0, 0); 780 if (!nlh) 781 goto nlmsg_failure; 782 783 zone = nf_ct_zone(ct); 784 785 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 786 if (!nest_parms) 787 goto nla_put_failure; 788 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 789 goto nla_put_failure; 790 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 791 NF_CT_ZONE_DIR_ORIG) < 0) 792 goto nla_put_failure; 793 nla_nest_end(skb, nest_parms); 794 795 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 796 if (!nest_parms) 797 goto nla_put_failure; 798 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 799 goto nla_put_failure; 800 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 801 NF_CT_ZONE_DIR_REPL) < 0) 802 goto nla_put_failure; 803 nla_nest_end(skb, nest_parms); 804 805 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 806 NF_CT_DEFAULT_ZONE_DIR) < 0) 807 goto nla_put_failure; 808 809 if (ctnetlink_dump_id(skb, ct) < 0) 810 goto nla_put_failure; 811 812 if (ctnetlink_dump_status(skb, ct) < 0) 813 goto nla_put_failure; 814 815 if (events & (1 << IPCT_DESTROY)) { 816 if (ctnetlink_dump_timeout(skb, ct, true) < 0) 817 goto nla_put_failure; 818 819 if (ctnetlink_dump_acct(skb, ct, type) < 0 || 820 ctnetlink_dump_timestamp(skb, ct) < 0 || 821 ctnetlink_dump_protoinfo(skb, ct, true) < 0) 822 goto nla_put_failure; 823 } else { 824 if (ctnetlink_dump_timeout(skb, ct, false) < 0) 825 goto nla_put_failure; 826 827 if (events & (1 << IPCT_PROTOINFO) && 828 ctnetlink_dump_protoinfo(skb, ct, false) < 0) 829 goto nla_put_failure; 830 831 if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) 832 && ctnetlink_dump_helpinfo(skb, ct) < 0) 833 goto nla_put_failure; 834 835 #ifdef CONFIG_NF_CONNTRACK_SECMARK 836 if ((events & (1 << IPCT_SECMARK) || ct->secmark) 837 && ctnetlink_dump_secctx(skb, ct) < 0) 838 goto nla_put_failure; 839 #endif 840 if (events & (1 << IPCT_LABEL) && 841 ctnetlink_dump_labels(skb, ct) < 0) 842 goto nla_put_failure; 843 844 if (events & (1 << IPCT_RELATED) && 845 ctnetlink_dump_master(skb, ct) < 0) 846 goto nla_put_failure; 847 848 if (events & (1 << IPCT_SEQADJ) && 849 ctnetlink_dump_ct_seq_adj(skb, ct) < 0) 850 goto nla_put_failure; 851 852 if (events & (1 << IPCT_SYNPROXY) && 853 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 854 goto nla_put_failure; 855 } 856 857 #ifdef CONFIG_NF_CONNTRACK_MARK 858 if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK))) 859 goto nla_put_failure; 860 #endif 861 862 if (ctnetlink_dump_event_timestamp(skb, ct)) 863 goto nla_put_failure; 864 865 nlmsg_end(skb, nlh); 866 err = nfnetlink_send(skb, net, item->portid, group, item->report, 867 GFP_ATOMIC); 868 if (err == -ENOBUFS || err == -EAGAIN) 869 return -ENOBUFS; 870 871 return 0; 872 873 nla_put_failure: 874 nlmsg_cancel(skb, nlh); 875 nlmsg_failure: 876 kfree_skb(skb); 877 errout: 878 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) 879 return -ENOBUFS; 880 881 return 0; 882 } 883 #endif /* CONFIG_NF_CONNTRACK_EVENTS */ 884 885 static int ctnetlink_done(struct netlink_callback *cb) 886 { 887 kfree(cb->data); 888 return 0; 889 } 890 891 struct ctnetlink_filter_u32 { 892 u32 val; 893 u32 mask; 894 }; 895 896 struct ctnetlink_filter { 897 u8 family; 898 bool zone_filter; 899 900 u_int32_t orig_flags; 901 u_int32_t reply_flags; 902 903 struct nf_conntrack_tuple orig; 904 struct nf_conntrack_tuple reply; 905 struct nf_conntrack_zone zone; 906 907 struct ctnetlink_filter_u32 mark; 908 struct ctnetlink_filter_u32 status; 909 }; 910 911 static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = { 912 [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 }, 913 [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 }, 914 }; 915 916 static int ctnetlink_parse_filter(const struct nlattr *attr, 917 struct ctnetlink_filter *filter) 918 { 919 struct nlattr *tb[CTA_FILTER_MAX + 1]; 920 int ret = 0; 921 922 ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy, 923 NULL); 924 if (ret) 925 return ret; 926 927 if (tb[CTA_FILTER_ORIG_FLAGS]) { 928 filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]); 929 if (filter->orig_flags & ~CTA_FILTER_F_ALL) 930 return -EOPNOTSUPP; 931 } 932 933 if (tb[CTA_FILTER_REPLY_FLAGS]) { 934 filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]); 935 if (filter->reply_flags & ~CTA_FILTER_F_ALL) 936 return -EOPNOTSUPP; 937 } 938 939 return 0; 940 } 941 942 static int ctnetlink_parse_zone(const struct nlattr *attr, 943 struct nf_conntrack_zone *zone); 944 static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], 945 struct nf_conntrack_tuple *tuple, 946 u32 type, u_int8_t l3num, 947 struct nf_conntrack_zone *zone, 948 u_int32_t flags); 949 950 static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark, 951 const struct nlattr * const cda[]) 952 { 953 #ifdef CONFIG_NF_CONNTRACK_MARK 954 if (cda[CTA_MARK]) { 955 mark->val = ntohl(nla_get_be32(cda[CTA_MARK])); 956 957 if (cda[CTA_MARK_MASK]) 958 mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); 959 else 960 mark->mask = 0xffffffff; 961 } else if (cda[CTA_MARK_MASK]) { 962 return -EINVAL; 963 } 964 #endif 965 return 0; 966 } 967 968 static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status, 969 const struct nlattr * const cda[]) 970 { 971 if (cda[CTA_STATUS]) { 972 status->val = ntohl(nla_get_be32(cda[CTA_STATUS])); 973 if (cda[CTA_STATUS_MASK]) 974 status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK])); 975 else 976 status->mask = status->val; 977 978 /* status->val == 0? always true, else always false. */ 979 if (status->mask == 0) 980 return -EINVAL; 981 } else if (cda[CTA_STATUS_MASK]) { 982 return -EINVAL; 983 } 984 985 /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */ 986 BUILD_BUG_ON(__IPS_MAX_BIT >= 32); 987 return 0; 988 } 989 990 static struct ctnetlink_filter * 991 ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) 992 { 993 struct ctnetlink_filter *filter; 994 int err; 995 996 #ifndef CONFIG_NF_CONNTRACK_MARK 997 if (cda[CTA_MARK] || cda[CTA_MARK_MASK]) 998 return ERR_PTR(-EOPNOTSUPP); 999 #endif 1000 1001 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 1002 if (filter == NULL) 1003 return ERR_PTR(-ENOMEM); 1004 1005 filter->family = family; 1006 1007 err = ctnetlink_filter_parse_mark(&filter->mark, cda); 1008 if (err) 1009 goto err_filter; 1010 1011 err = ctnetlink_filter_parse_status(&filter->status, cda); 1012 if (err) 1013 goto err_filter; 1014 1015 if (cda[CTA_ZONE]) { 1016 err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone); 1017 if (err < 0) 1018 goto err_filter; 1019 filter->zone_filter = true; 1020 } 1021 1022 if (!cda[CTA_FILTER]) 1023 return filter; 1024 1025 err = ctnetlink_parse_filter(cda[CTA_FILTER], filter); 1026 if (err < 0) 1027 goto err_filter; 1028 1029 if (filter->orig_flags) { 1030 if (!cda[CTA_TUPLE_ORIG]) { 1031 err = -EINVAL; 1032 goto err_filter; 1033 } 1034 1035 err = ctnetlink_parse_tuple_filter(cda, &filter->orig, 1036 CTA_TUPLE_ORIG, 1037 filter->family, 1038 &filter->zone, 1039 filter->orig_flags); 1040 if (err < 0) 1041 goto err_filter; 1042 } 1043 1044 if (filter->reply_flags) { 1045 if (!cda[CTA_TUPLE_REPLY]) { 1046 err = -EINVAL; 1047 goto err_filter; 1048 } 1049 1050 err = ctnetlink_parse_tuple_filter(cda, &filter->reply, 1051 CTA_TUPLE_REPLY, 1052 filter->family, 1053 &filter->zone, 1054 filter->reply_flags); 1055 if (err < 0) 1056 goto err_filter; 1057 } 1058 1059 return filter; 1060 1061 err_filter: 1062 kfree(filter); 1063 1064 return ERR_PTR(err); 1065 } 1066 1067 static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda) 1068 { 1069 return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS] || cda[CTA_ZONE]; 1070 } 1071 1072 static int ctnetlink_start(struct netlink_callback *cb) 1073 { 1074 const struct nlattr * const *cda = cb->data; 1075 struct ctnetlink_filter *filter = NULL; 1076 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1077 u8 family = nfmsg->nfgen_family; 1078 1079 if (ctnetlink_needs_filter(family, cda)) { 1080 filter = ctnetlink_alloc_filter(cda, family); 1081 if (IS_ERR(filter)) 1082 return PTR_ERR(filter); 1083 } 1084 1085 cb->data = filter; 1086 return 0; 1087 } 1088 1089 static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple, 1090 struct nf_conntrack_tuple *ct_tuple, 1091 u_int32_t flags, int family) 1092 { 1093 switch (family) { 1094 case NFPROTO_IPV4: 1095 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && 1096 filter_tuple->src.u3.ip != ct_tuple->src.u3.ip) 1097 return 0; 1098 1099 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && 1100 filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip) 1101 return 0; 1102 break; 1103 case NFPROTO_IPV6: 1104 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && 1105 !ipv6_addr_cmp(&filter_tuple->src.u3.in6, 1106 &ct_tuple->src.u3.in6)) 1107 return 0; 1108 1109 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && 1110 !ipv6_addr_cmp(&filter_tuple->dst.u3.in6, 1111 &ct_tuple->dst.u3.in6)) 1112 return 0; 1113 break; 1114 } 1115 1116 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) && 1117 filter_tuple->dst.protonum != ct_tuple->dst.protonum) 1118 return 0; 1119 1120 switch (ct_tuple->dst.protonum) { 1121 case IPPROTO_TCP: 1122 case IPPROTO_UDP: 1123 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) && 1124 filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port) 1125 return 0; 1126 1127 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) && 1128 filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port) 1129 return 0; 1130 break; 1131 case IPPROTO_ICMP: 1132 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) && 1133 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) 1134 return 0; 1135 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) && 1136 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) 1137 return 0; 1138 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) && 1139 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) 1140 return 0; 1141 break; 1142 case IPPROTO_ICMPV6: 1143 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) && 1144 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) 1145 return 0; 1146 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) && 1147 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) 1148 return 0; 1149 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) && 1150 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) 1151 return 0; 1152 break; 1153 } 1154 1155 return 1; 1156 } 1157 1158 static int ctnetlink_filter_match(struct nf_conn *ct, void *data) 1159 { 1160 struct ctnetlink_filter *filter = data; 1161 struct nf_conntrack_tuple *tuple; 1162 u32 status; 1163 1164 if (filter == NULL) 1165 goto out; 1166 1167 /* Match entries of a given L3 protocol number. 1168 * If it is not specified, ie. l3proto == 0, 1169 * then match everything. 1170 */ 1171 if (filter->family && nf_ct_l3num(ct) != filter->family) 1172 goto ignore_entry; 1173 1174 if (filter->zone_filter && 1175 !nf_ct_zone_equal_any(ct, &filter->zone)) 1176 goto ignore_entry; 1177 1178 if (filter->orig_flags) { 1179 tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL); 1180 if (!ctnetlink_filter_match_tuple(&filter->orig, tuple, 1181 filter->orig_flags, 1182 filter->family)) 1183 goto ignore_entry; 1184 } 1185 1186 if (filter->reply_flags) { 1187 tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY); 1188 if (!ctnetlink_filter_match_tuple(&filter->reply, tuple, 1189 filter->reply_flags, 1190 filter->family)) 1191 goto ignore_entry; 1192 } 1193 1194 #ifdef CONFIG_NF_CONNTRACK_MARK 1195 if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val) 1196 goto ignore_entry; 1197 #endif 1198 status = (u32)READ_ONCE(ct->status); 1199 if ((status & filter->status.mask) != filter->status.val) 1200 goto ignore_entry; 1201 1202 out: 1203 return 1; 1204 1205 ignore_entry: 1206 return 0; 1207 } 1208 1209 static unsigned long ctnetlink_get_id(const struct nf_conn *ct) 1210 { 1211 unsigned long id = nf_ct_get_id(ct); 1212 1213 return id ? id : 1; 1214 } 1215 1216 static int 1217 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1218 { 1219 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; 1220 struct net *net = sock_net(skb->sk); 1221 unsigned long last_id = cb->args[1]; 1222 struct nf_conntrack_tuple_hash *h; 1223 struct hlist_nulls_node *n; 1224 struct nf_conn *nf_ct_evict[8]; 1225 struct nf_conn *ct; 1226 int res, i; 1227 spinlock_t *lockp; 1228 1229 i = 0; 1230 1231 local_bh_disable(); 1232 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 1233 restart: 1234 while (i) { 1235 i--; 1236 if (nf_ct_should_gc(nf_ct_evict[i])) 1237 nf_ct_kill(nf_ct_evict[i]); 1238 nf_ct_put(nf_ct_evict[i]); 1239 } 1240 1241 lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; 1242 nf_conntrack_lock(lockp); 1243 if (cb->args[0] >= nf_conntrack_htable_size) { 1244 spin_unlock(lockp); 1245 goto out; 1246 } 1247 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], 1248 hnnode) { 1249 ct = nf_ct_tuplehash_to_ctrack(h); 1250 if (nf_ct_is_expired(ct)) { 1251 /* need to defer nf_ct_kill() until lock is released */ 1252 if (i < ARRAY_SIZE(nf_ct_evict) && 1253 refcount_inc_not_zero(&ct->ct_general.use)) 1254 nf_ct_evict[i++] = ct; 1255 continue; 1256 } 1257 1258 if (!net_eq(net, nf_ct_net(ct))) 1259 continue; 1260 1261 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 1262 continue; 1263 1264 if (cb->args[1]) { 1265 if (ctnetlink_get_id(ct) != last_id) 1266 continue; 1267 cb->args[1] = 0; 1268 } 1269 if (!ctnetlink_filter_match(ct, cb->data)) 1270 continue; 1271 1272 res = 1273 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1274 cb->nlh->nlmsg_seq, 1275 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1276 ct, true, flags); 1277 if (res < 0) { 1278 cb->args[1] = ctnetlink_get_id(ct); 1279 spin_unlock(lockp); 1280 goto out; 1281 } 1282 } 1283 spin_unlock(lockp); 1284 if (cb->args[1]) { 1285 cb->args[1] = 0; 1286 goto restart; 1287 } 1288 } 1289 out: 1290 local_bh_enable(); 1291 if (last_id) { 1292 /* nf ct hash resize happened, now clear the leftover. */ 1293 if (cb->args[1] == last_id) 1294 cb->args[1] = 0; 1295 } 1296 1297 while (i) { 1298 i--; 1299 if (nf_ct_should_gc(nf_ct_evict[i])) 1300 nf_ct_kill(nf_ct_evict[i]); 1301 nf_ct_put(nf_ct_evict[i]); 1302 } 1303 1304 return skb->len; 1305 } 1306 1307 static int ipv4_nlattr_to_tuple(struct nlattr *tb[], 1308 struct nf_conntrack_tuple *t, 1309 u_int32_t flags) 1310 { 1311 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1312 if (!tb[CTA_IP_V4_SRC]) 1313 return -EINVAL; 1314 1315 t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); 1316 } 1317 1318 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { 1319 if (!tb[CTA_IP_V4_DST]) 1320 return -EINVAL; 1321 1322 t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); 1323 } 1324 1325 return 0; 1326 } 1327 1328 static int ipv6_nlattr_to_tuple(struct nlattr *tb[], 1329 struct nf_conntrack_tuple *t, 1330 u_int32_t flags) 1331 { 1332 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1333 if (!tb[CTA_IP_V6_SRC]) 1334 return -EINVAL; 1335 1336 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); 1337 } 1338 1339 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { 1340 if (!tb[CTA_IP_V6_DST]) 1341 return -EINVAL; 1342 1343 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); 1344 } 1345 1346 return 0; 1347 } 1348 1349 static int ctnetlink_parse_tuple_ip(struct nlattr *attr, 1350 struct nf_conntrack_tuple *tuple, 1351 u_int32_t flags) 1352 { 1353 struct nlattr *tb[CTA_IP_MAX+1]; 1354 int ret = 0; 1355 1356 ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, 1357 cta_ip_nla_policy, NULL); 1358 if (ret < 0) 1359 return ret; 1360 1361 switch (tuple->src.l3num) { 1362 case NFPROTO_IPV4: 1363 ret = ipv4_nlattr_to_tuple(tb, tuple, flags); 1364 break; 1365 case NFPROTO_IPV6: 1366 ret = ipv6_nlattr_to_tuple(tb, tuple, flags); 1367 break; 1368 } 1369 1370 return ret; 1371 } 1372 1373 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { 1374 [CTA_PROTO_NUM] = { .type = NLA_U8 }, 1375 }; 1376 1377 static int ctnetlink_parse_tuple_proto(struct nlattr *attr, 1378 struct nf_conntrack_tuple *tuple, 1379 u_int32_t flags) 1380 { 1381 const struct nf_conntrack_l4proto *l4proto; 1382 struct nlattr *tb[CTA_PROTO_MAX+1]; 1383 int ret = 0; 1384 1385 ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr, 1386 proto_nla_policy, NULL); 1387 if (ret < 0) 1388 return ret; 1389 1390 if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM))) 1391 return 0; 1392 1393 if (!tb[CTA_PROTO_NUM]) 1394 return -EINVAL; 1395 1396 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); 1397 1398 rcu_read_lock(); 1399 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 1400 1401 if (likely(l4proto->nlattr_to_tuple)) { 1402 ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX, 1403 l4proto->nla_policy, 1404 NULL); 1405 if (ret == 0) 1406 ret = l4proto->nlattr_to_tuple(tb, tuple, flags); 1407 } 1408 1409 rcu_read_unlock(); 1410 1411 return ret; 1412 } 1413 1414 static int 1415 ctnetlink_parse_zone(const struct nlattr *attr, 1416 struct nf_conntrack_zone *zone) 1417 { 1418 nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID, 1419 NF_CT_DEFAULT_ZONE_DIR, 0); 1420 #ifdef CONFIG_NF_CONNTRACK_ZONES 1421 if (attr) 1422 zone->id = ntohs(nla_get_be16(attr)); 1423 #else 1424 if (attr) 1425 return -EOPNOTSUPP; 1426 #endif 1427 return 0; 1428 } 1429 1430 static int 1431 ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type, 1432 struct nf_conntrack_zone *zone) 1433 { 1434 int ret; 1435 1436 if (zone->id != NF_CT_DEFAULT_ZONE_ID) 1437 return -EINVAL; 1438 1439 ret = ctnetlink_parse_zone(attr, zone); 1440 if (ret < 0) 1441 return ret; 1442 1443 if (type == CTA_TUPLE_REPLY) 1444 zone->dir = NF_CT_ZONE_DIR_REPL; 1445 else 1446 zone->dir = NF_CT_ZONE_DIR_ORIG; 1447 1448 return 0; 1449 } 1450 1451 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { 1452 [CTA_TUPLE_IP] = { .type = NLA_NESTED }, 1453 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, 1454 [CTA_TUPLE_ZONE] = { .type = NLA_U16 }, 1455 }; 1456 1457 #define CTA_FILTER_F_ALL_CTA_PROTO \ 1458 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \ 1459 CTA_FILTER_F_CTA_PROTO_DST_PORT | \ 1460 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \ 1461 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \ 1462 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \ 1463 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \ 1464 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \ 1465 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID) 1466 1467 static int 1468 ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], 1469 struct nf_conntrack_tuple *tuple, u32 type, 1470 u_int8_t l3num, struct nf_conntrack_zone *zone, 1471 u_int32_t flags) 1472 { 1473 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1474 int err; 1475 1476 memset(tuple, 0, sizeof(*tuple)); 1477 1478 err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type], 1479 tuple_nla_policy, NULL); 1480 if (err < 0) 1481 return err; 1482 1483 if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) 1484 return -EOPNOTSUPP; 1485 tuple->src.l3num = l3num; 1486 1487 if (flags & CTA_FILTER_FLAG(CTA_IP_DST) || 1488 flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1489 if (!tb[CTA_TUPLE_IP]) 1490 return -EINVAL; 1491 1492 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags); 1493 if (err < 0) 1494 return err; 1495 } 1496 1497 if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) { 1498 if (!tb[CTA_TUPLE_PROTO]) 1499 return -EINVAL; 1500 1501 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags); 1502 if (err < 0) 1503 return err; 1504 } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) { 1505 /* Can't manage proto flags without a protonum */ 1506 return -EINVAL; 1507 } 1508 1509 if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) { 1510 if (!zone) 1511 return -EINVAL; 1512 1513 err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE], 1514 type, zone); 1515 if (err < 0) 1516 return err; 1517 } 1518 1519 /* orig and expect tuples get DIR_ORIGINAL */ 1520 if (type == CTA_TUPLE_REPLY) 1521 tuple->dst.dir = IP_CT_DIR_REPLY; 1522 else 1523 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 1524 1525 return 0; 1526 } 1527 1528 static int 1529 ctnetlink_parse_tuple(const struct nlattr * const cda[], 1530 struct nf_conntrack_tuple *tuple, u32 type, 1531 u_int8_t l3num, struct nf_conntrack_zone *zone) 1532 { 1533 return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone, 1534 CTA_FILTER_FLAG(ALL)); 1535 } 1536 1537 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { 1538 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING, 1539 .len = NF_CT_HELPER_NAME_LEN - 1 }, 1540 }; 1541 1542 static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, 1543 struct nlattr **helpinfo) 1544 { 1545 int err; 1546 struct nlattr *tb[CTA_HELP_MAX+1]; 1547 1548 err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr, 1549 help_nla_policy, NULL); 1550 if (err < 0) 1551 return err; 1552 1553 if (!tb[CTA_HELP_NAME]) 1554 return -EINVAL; 1555 1556 *helper_name = nla_data(tb[CTA_HELP_NAME]); 1557 1558 if (tb[CTA_HELP_INFO]) 1559 *helpinfo = tb[CTA_HELP_INFO]; 1560 1561 return 0; 1562 } 1563 1564 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 1565 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, 1566 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, 1567 [CTA_STATUS] = { .type = NLA_U32 }, 1568 [CTA_PROTOINFO] = { .type = NLA_NESTED }, 1569 [CTA_HELP] = { .type = NLA_NESTED }, 1570 [CTA_NAT_SRC] = { .type = NLA_NESTED }, 1571 [CTA_TIMEOUT] = { .type = NLA_U32 }, 1572 [CTA_MARK] = { .type = NLA_U32 }, 1573 [CTA_ID] = { .type = NLA_U32 }, 1574 [CTA_NAT_DST] = { .type = NLA_NESTED }, 1575 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, 1576 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED }, 1577 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED }, 1578 [CTA_ZONE] = { .type = NLA_U16 }, 1579 [CTA_MARK_MASK] = { .type = NLA_U32 }, 1580 [CTA_LABELS] = { .type = NLA_BINARY, 1581 .len = NF_CT_LABELS_MAX_SIZE }, 1582 [CTA_LABELS_MASK] = { .type = NLA_BINARY, 1583 .len = NF_CT_LABELS_MAX_SIZE }, 1584 [CTA_FILTER] = { .type = NLA_NESTED }, 1585 [CTA_STATUS_MASK] = { .type = NLA_U32 }, 1586 [CTA_TIMESTAMP_EVENT] = { .type = NLA_REJECT }, 1587 }; 1588 1589 static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) 1590 { 1591 return ctnetlink_filter_match(ct, data); 1592 } 1593 1594 static int ctnetlink_flush_conntrack(struct net *net, 1595 const struct nlattr * const cda[], 1596 u32 portid, int report, u8 family) 1597 { 1598 struct ctnetlink_filter *filter = NULL; 1599 struct nf_ct_iter_data iter = { 1600 .net = net, 1601 .portid = portid, 1602 .report = report, 1603 }; 1604 1605 if (ctnetlink_needs_filter(family, cda)) { 1606 filter = ctnetlink_alloc_filter(cda, family); 1607 if (IS_ERR(filter)) 1608 return PTR_ERR(filter); 1609 1610 iter.data = filter; 1611 } 1612 1613 nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter); 1614 kfree(filter); 1615 1616 return 0; 1617 } 1618 1619 static int ctnetlink_del_conntrack(struct sk_buff *skb, 1620 const struct nfnl_info *info, 1621 const struct nlattr * const cda[]) 1622 { 1623 u8 family = info->nfmsg->nfgen_family; 1624 struct nf_conntrack_tuple_hash *h; 1625 struct nf_conntrack_tuple tuple; 1626 struct nf_conntrack_zone zone; 1627 struct nf_conn *ct; 1628 int err; 1629 1630 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1631 if (err < 0) 1632 return err; 1633 1634 if (cda[CTA_TUPLE_ORIG] && !cda[CTA_FILTER]) 1635 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, 1636 family, &zone); 1637 else if (cda[CTA_TUPLE_REPLY] && !cda[CTA_FILTER]) 1638 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, 1639 family, &zone); 1640 else { 1641 u8 u3 = info->nfmsg->version || cda[CTA_FILTER] ? family : AF_UNSPEC; 1642 1643 return ctnetlink_flush_conntrack(info->net, cda, 1644 NETLINK_CB(skb).portid, 1645 nlmsg_report(info->nlh), u3); 1646 } 1647 1648 if (err < 0) 1649 return err; 1650 1651 h = nf_conntrack_find_get(info->net, &zone, &tuple); 1652 if (!h) 1653 return -ENOENT; 1654 1655 ct = nf_ct_tuplehash_to_ctrack(h); 1656 1657 if (cda[CTA_ID]) { 1658 __be32 id = nla_get_be32(cda[CTA_ID]); 1659 1660 if (id != (__force __be32)nf_ct_get_id(ct)) { 1661 nf_ct_put(ct); 1662 return -ENOENT; 1663 } 1664 } 1665 1666 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); 1667 nf_ct_put(ct); 1668 1669 return 0; 1670 } 1671 1672 static int ctnetlink_get_conntrack(struct sk_buff *skb, 1673 const struct nfnl_info *info, 1674 const struct nlattr * const cda[]) 1675 { 1676 u_int8_t u3 = info->nfmsg->nfgen_family; 1677 struct nf_conntrack_tuple_hash *h; 1678 struct nf_conntrack_tuple tuple; 1679 struct nf_conntrack_zone zone; 1680 struct sk_buff *skb2; 1681 struct nf_conn *ct; 1682 int err; 1683 1684 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1685 struct netlink_dump_control c = { 1686 .start = ctnetlink_start, 1687 .dump = ctnetlink_dump_table, 1688 .done = ctnetlink_done, 1689 .data = (void *)cda, 1690 }; 1691 1692 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1693 } 1694 1695 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1696 if (err < 0) 1697 return err; 1698 1699 if (cda[CTA_TUPLE_ORIG]) 1700 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, 1701 u3, &zone); 1702 else if (cda[CTA_TUPLE_REPLY]) 1703 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, 1704 u3, &zone); 1705 else 1706 return -EINVAL; 1707 1708 if (err < 0) 1709 return err; 1710 1711 h = nf_conntrack_find_get(info->net, &zone, &tuple); 1712 if (!h) 1713 return -ENOENT; 1714 1715 ct = nf_ct_tuplehash_to_ctrack(h); 1716 1717 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1718 if (!skb2) { 1719 nf_ct_put(ct); 1720 return -ENOMEM; 1721 } 1722 1723 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, 1724 info->nlh->nlmsg_seq, 1725 NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct, 1726 true, 0); 1727 nf_ct_put(ct); 1728 if (err <= 0) { 1729 kfree_skb(skb2); 1730 return -ENOMEM; 1731 } 1732 1733 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 1734 } 1735 1736 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1737 static int ctnetlink_dump_one_entry(struct sk_buff *skb, 1738 struct netlink_callback *cb, 1739 struct nf_conn *ct, 1740 bool dying) 1741 { 1742 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1743 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1744 u8 l3proto = nfmsg->nfgen_family; 1745 int res; 1746 1747 if (l3proto && nf_ct_l3num(ct) != l3proto) 1748 return 0; 1749 1750 if (ctx->last_id) { 1751 if (ctnetlink_get_id(ct) != ctx->last_id) 1752 return 0; 1753 1754 ctx->last_id = 0; 1755 } 1756 1757 /* We can't dump extension info for the unconfirmed 1758 * list because unconfirmed conntracks can have 1759 * ct->ext reallocated (and thus freed). 1760 * 1761 * In the dying list case ct->ext can't be free'd 1762 * until after we drop pcpu->lock. 1763 */ 1764 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1765 cb->nlh->nlmsg_seq, 1766 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1767 ct, dying, 0); 1768 if (res < 0) 1769 ctx->last_id = ctnetlink_get_id(ct); 1770 1771 return res; 1772 } 1773 #endif 1774 1775 static int 1776 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) 1777 { 1778 return 0; 1779 } 1780 1781 static int 1782 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) 1783 { 1784 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1785 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1786 const struct net *net = sock_net(skb->sk); 1787 struct nf_conntrack_net_ecache *ecache_net; 1788 unsigned long last_id = ctx->last_id; 1789 struct nf_conntrack_tuple_hash *h; 1790 struct hlist_nulls_node *n; 1791 #endif 1792 1793 if (ctx->done) 1794 return 0; 1795 1796 ctx->last_id = 0; 1797 1798 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1799 ecache_net = nf_conn_pernet_ecache(net); 1800 spin_lock_bh(&ecache_net->dying_lock); 1801 1802 hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) { 1803 struct nf_conn *ct; 1804 int res; 1805 1806 ct = nf_ct_tuplehash_to_ctrack(h); 1807 if (last_id && last_id != ctnetlink_get_id(ct)) 1808 continue; 1809 1810 res = ctnetlink_dump_one_entry(skb, cb, ct, true); 1811 if (res < 0) { 1812 spin_unlock_bh(&ecache_net->dying_lock); 1813 return skb->len; 1814 } 1815 1816 last_id = 0; 1817 } 1818 1819 spin_unlock_bh(&ecache_net->dying_lock); 1820 #endif 1821 ctx->done = true; 1822 1823 return skb->len; 1824 } 1825 1826 static int ctnetlink_get_ct_dying(struct sk_buff *skb, 1827 const struct nfnl_info *info, 1828 const struct nlattr * const cda[]) 1829 { 1830 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1831 struct netlink_dump_control c = { 1832 .dump = ctnetlink_dump_dying, 1833 }; 1834 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1835 } 1836 1837 return -EOPNOTSUPP; 1838 } 1839 1840 static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb, 1841 const struct nfnl_info *info, 1842 const struct nlattr * const cda[]) 1843 { 1844 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1845 struct netlink_dump_control c = { 1846 .dump = ctnetlink_dump_unconfirmed, 1847 }; 1848 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1849 } 1850 1851 return -EOPNOTSUPP; 1852 } 1853 1854 #if IS_ENABLED(CONFIG_NF_NAT) 1855 static int 1856 ctnetlink_parse_nat_setup(struct nf_conn *ct, 1857 enum nf_nat_manip_type manip, 1858 const struct nlattr *attr) 1859 __must_hold(RCU) 1860 { 1861 const struct nf_nat_hook *nat_hook; 1862 int err; 1863 1864 nat_hook = rcu_dereference(nf_nat_hook); 1865 if (!nat_hook) { 1866 #ifdef CONFIG_MODULES 1867 rcu_read_unlock(); 1868 nfnl_unlock(NFNL_SUBSYS_CTNETLINK); 1869 if (request_module("nf-nat") < 0) { 1870 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1871 rcu_read_lock(); 1872 return -EOPNOTSUPP; 1873 } 1874 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1875 rcu_read_lock(); 1876 nat_hook = rcu_dereference(nf_nat_hook); 1877 if (nat_hook) 1878 return -EAGAIN; 1879 #endif 1880 return -EOPNOTSUPP; 1881 } 1882 1883 err = nat_hook->parse_nat_setup(ct, manip, attr); 1884 if (err == -EAGAIN) { 1885 #ifdef CONFIG_MODULES 1886 rcu_read_unlock(); 1887 nfnl_unlock(NFNL_SUBSYS_CTNETLINK); 1888 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) { 1889 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1890 rcu_read_lock(); 1891 return -EOPNOTSUPP; 1892 } 1893 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1894 rcu_read_lock(); 1895 #else 1896 err = -EOPNOTSUPP; 1897 #endif 1898 } 1899 return err; 1900 } 1901 #endif 1902 1903 static int 1904 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) 1905 { 1906 return nf_ct_change_status_common(ct, ntohl(nla_get_be32(cda[CTA_STATUS]))); 1907 } 1908 1909 static int 1910 ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) 1911 { 1912 #if IS_ENABLED(CONFIG_NF_NAT) 1913 int ret; 1914 1915 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1916 return 0; 1917 1918 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1919 cda[CTA_NAT_DST]); 1920 if (ret < 0) 1921 return ret; 1922 1923 return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, 1924 cda[CTA_NAT_SRC]); 1925 #else 1926 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1927 return 0; 1928 return -EOPNOTSUPP; 1929 #endif 1930 } 1931 1932 static int ctnetlink_change_helper(struct nf_conn *ct, 1933 const struct nlattr * const cda[]) 1934 { 1935 struct nf_conntrack_helper *helper; 1936 struct nf_conn_help *help = nfct_help(ct); 1937 char *helpname = NULL; 1938 struct nlattr *helpinfo = NULL; 1939 int err; 1940 1941 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); 1942 if (err < 0) 1943 return err; 1944 1945 /* don't change helper of sibling connections */ 1946 if (ct->master) { 1947 /* If we try to change the helper to the same thing twice, 1948 * treat the second attempt as a no-op instead of returning 1949 * an error. 1950 */ 1951 err = -EBUSY; 1952 if (help) { 1953 rcu_read_lock(); 1954 helper = rcu_dereference(help->helper); 1955 if (helper && !strcmp(helper->name, helpname)) 1956 err = 0; 1957 rcu_read_unlock(); 1958 } 1959 1960 return err; 1961 } 1962 1963 if (!strcmp(helpname, "")) { 1964 if (help && help->helper) { 1965 /* we had a helper before ... */ 1966 nf_ct_remove_expectations(ct); 1967 RCU_INIT_POINTER(help->helper, NULL); 1968 } 1969 1970 return 0; 1971 } 1972 1973 rcu_read_lock(); 1974 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 1975 nf_ct_protonum(ct)); 1976 if (helper == NULL) { 1977 rcu_read_unlock(); 1978 return -EOPNOTSUPP; 1979 } 1980 1981 if (help) { 1982 if (rcu_access_pointer(help->helper) == helper) { 1983 /* update private helper data if allowed. */ 1984 if (helper->from_nlattr) 1985 helper->from_nlattr(helpinfo, ct); 1986 err = 0; 1987 } else 1988 err = -EBUSY; 1989 } else { 1990 /* we cannot set a helper for an existing conntrack */ 1991 err = -EOPNOTSUPP; 1992 } 1993 1994 rcu_read_unlock(); 1995 return err; 1996 } 1997 1998 static int ctnetlink_change_timeout(struct nf_conn *ct, 1999 const struct nlattr * const cda[]) 2000 { 2001 return __nf_ct_change_timeout(ct, (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ); 2002 } 2003 2004 #if defined(CONFIG_NF_CONNTRACK_MARK) 2005 static void ctnetlink_change_mark(struct nf_conn *ct, 2006 const struct nlattr * const cda[]) 2007 { 2008 u32 mark, newmark, mask = 0; 2009 2010 if (cda[CTA_MARK_MASK]) 2011 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK])); 2012 2013 mark = ntohl(nla_get_be32(cda[CTA_MARK])); 2014 newmark = (READ_ONCE(ct->mark) & mask) ^ mark; 2015 if (newmark != READ_ONCE(ct->mark)) 2016 WRITE_ONCE(ct->mark, newmark); 2017 } 2018 #endif 2019 2020 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { 2021 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, 2022 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, 2023 }; 2024 2025 static int ctnetlink_change_protoinfo(struct nf_conn *ct, 2026 const struct nlattr * const cda[]) 2027 { 2028 const struct nlattr *attr = cda[CTA_PROTOINFO]; 2029 const struct nf_conntrack_l4proto *l4proto; 2030 struct nlattr *tb[CTA_PROTOINFO_MAX+1]; 2031 int err = 0; 2032 2033 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr, 2034 protoinfo_policy, NULL); 2035 if (err < 0) 2036 return err; 2037 2038 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 2039 if (l4proto->from_nlattr) 2040 err = l4proto->from_nlattr(tb, ct); 2041 2042 return err; 2043 } 2044 2045 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = { 2046 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 }, 2047 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 }, 2048 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 }, 2049 }; 2050 2051 static int change_seq_adj(struct nf_ct_seqadj *seq, 2052 const struct nlattr * const attr) 2053 { 2054 int err; 2055 struct nlattr *cda[CTA_SEQADJ_MAX+1]; 2056 2057 err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr, 2058 seqadj_policy, NULL); 2059 if (err < 0) 2060 return err; 2061 2062 if (!cda[CTA_SEQADJ_CORRECTION_POS]) 2063 return -EINVAL; 2064 2065 seq->correction_pos = 2066 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS])); 2067 2068 if (!cda[CTA_SEQADJ_OFFSET_BEFORE]) 2069 return -EINVAL; 2070 2071 seq->offset_before = 2072 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE])); 2073 2074 if (!cda[CTA_SEQADJ_OFFSET_AFTER]) 2075 return -EINVAL; 2076 2077 seq->offset_after = 2078 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER])); 2079 2080 return 0; 2081 } 2082 2083 static int 2084 ctnetlink_change_seq_adj(struct nf_conn *ct, 2085 const struct nlattr * const cda[]) 2086 { 2087 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 2088 int ret = 0; 2089 2090 if (!seqadj) 2091 return 0; 2092 2093 spin_lock_bh(&ct->lock); 2094 if (cda[CTA_SEQ_ADJ_ORIG]) { 2095 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL], 2096 cda[CTA_SEQ_ADJ_ORIG]); 2097 if (ret < 0) 2098 goto err; 2099 2100 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 2101 } 2102 2103 if (cda[CTA_SEQ_ADJ_REPLY]) { 2104 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY], 2105 cda[CTA_SEQ_ADJ_REPLY]); 2106 if (ret < 0) 2107 goto err; 2108 2109 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 2110 } 2111 2112 spin_unlock_bh(&ct->lock); 2113 return 0; 2114 err: 2115 spin_unlock_bh(&ct->lock); 2116 return ret; 2117 } 2118 2119 static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = { 2120 [CTA_SYNPROXY_ISN] = { .type = NLA_U32 }, 2121 [CTA_SYNPROXY_ITS] = { .type = NLA_U32 }, 2122 [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 }, 2123 }; 2124 2125 static int ctnetlink_change_synproxy(struct nf_conn *ct, 2126 const struct nlattr * const cda[]) 2127 { 2128 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); 2129 struct nlattr *tb[CTA_SYNPROXY_MAX + 1]; 2130 int err; 2131 2132 if (!synproxy) 2133 return 0; 2134 2135 err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX, 2136 cda[CTA_SYNPROXY], synproxy_policy, 2137 NULL); 2138 if (err < 0) 2139 return err; 2140 2141 if (!tb[CTA_SYNPROXY_ISN] || 2142 !tb[CTA_SYNPROXY_ITS] || 2143 !tb[CTA_SYNPROXY_TSOFF]) 2144 return -EINVAL; 2145 2146 synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN])); 2147 synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS])); 2148 synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF])); 2149 2150 return 0; 2151 } 2152 2153 static int 2154 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[]) 2155 { 2156 #ifdef CONFIG_NF_CONNTRACK_LABELS 2157 size_t len = nla_len(cda[CTA_LABELS]); 2158 const void *mask = cda[CTA_LABELS_MASK]; 2159 2160 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */ 2161 return -EINVAL; 2162 2163 if (mask) { 2164 if (nla_len(cda[CTA_LABELS_MASK]) == 0 || 2165 nla_len(cda[CTA_LABELS_MASK]) != len) 2166 return -EINVAL; 2167 mask = nla_data(cda[CTA_LABELS_MASK]); 2168 } 2169 2170 len /= sizeof(u32); 2171 2172 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len); 2173 #else 2174 return -EOPNOTSUPP; 2175 #endif 2176 } 2177 2178 static int 2179 ctnetlink_change_conntrack(struct nf_conn *ct, 2180 const struct nlattr * const cda[]) 2181 { 2182 int err; 2183 2184 /* only allow NAT changes and master assignation for new conntracks */ 2185 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) 2186 return -EOPNOTSUPP; 2187 2188 if (cda[CTA_HELP]) { 2189 err = ctnetlink_change_helper(ct, cda); 2190 if (err < 0) 2191 return err; 2192 } 2193 2194 if (cda[CTA_TIMEOUT]) { 2195 err = ctnetlink_change_timeout(ct, cda); 2196 if (err < 0) 2197 return err; 2198 } 2199 2200 if (cda[CTA_STATUS]) { 2201 err = ctnetlink_change_status(ct, cda); 2202 if (err < 0) 2203 return err; 2204 } 2205 2206 if (cda[CTA_PROTOINFO]) { 2207 err = ctnetlink_change_protoinfo(ct, cda); 2208 if (err < 0) 2209 return err; 2210 } 2211 2212 #if defined(CONFIG_NF_CONNTRACK_MARK) 2213 if (cda[CTA_MARK]) 2214 ctnetlink_change_mark(ct, cda); 2215 #endif 2216 2217 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { 2218 err = ctnetlink_change_seq_adj(ct, cda); 2219 if (err < 0) 2220 return err; 2221 } 2222 2223 if (cda[CTA_SYNPROXY]) { 2224 err = ctnetlink_change_synproxy(ct, cda); 2225 if (err < 0) 2226 return err; 2227 } 2228 2229 if (cda[CTA_LABELS]) { 2230 err = ctnetlink_attach_labels(ct, cda); 2231 if (err < 0) 2232 return err; 2233 } 2234 2235 return 0; 2236 } 2237 2238 static struct nf_conn * 2239 ctnetlink_create_conntrack(struct net *net, 2240 const struct nf_conntrack_zone *zone, 2241 const struct nlattr * const cda[], 2242 struct nf_conntrack_tuple *otuple, 2243 struct nf_conntrack_tuple *rtuple, 2244 u8 u3) 2245 { 2246 struct nf_conn *ct; 2247 int err = -EINVAL; 2248 struct nf_conntrack_helper *helper; 2249 struct nf_conn_tstamp *tstamp; 2250 u64 timeout; 2251 2252 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 2253 if (IS_ERR(ct)) 2254 return ERR_PTR(-ENOMEM); 2255 2256 if (!cda[CTA_TIMEOUT]) 2257 goto err1; 2258 2259 rcu_read_lock(); 2260 if (cda[CTA_HELP]) { 2261 char *helpname = NULL; 2262 struct nlattr *helpinfo = NULL; 2263 2264 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); 2265 if (err < 0) 2266 goto err2; 2267 2268 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2269 nf_ct_protonum(ct)); 2270 if (helper == NULL) { 2271 rcu_read_unlock(); 2272 #ifdef CONFIG_MODULES 2273 if (request_module("nfct-helper-%s", helpname) < 0) { 2274 err = -EOPNOTSUPP; 2275 goto err1; 2276 } 2277 2278 rcu_read_lock(); 2279 helper = __nf_conntrack_helper_find(helpname, 2280 nf_ct_l3num(ct), 2281 nf_ct_protonum(ct)); 2282 if (helper) { 2283 err = -EAGAIN; 2284 goto err2; 2285 } 2286 rcu_read_unlock(); 2287 #endif 2288 err = -EOPNOTSUPP; 2289 goto err1; 2290 } else { 2291 struct nf_conn_help *help; 2292 2293 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 2294 if (help == NULL) { 2295 err = -ENOMEM; 2296 goto err2; 2297 } 2298 /* set private helper data if allowed. */ 2299 if (helper->from_nlattr) 2300 helper->from_nlattr(helpinfo, ct); 2301 2302 /* disable helper auto-assignment for this entry */ 2303 ct->status |= IPS_HELPER; 2304 RCU_INIT_POINTER(help->helper, helper); 2305 } 2306 } 2307 2308 err = ctnetlink_setup_nat(ct, cda); 2309 if (err < 0) 2310 goto err2; 2311 2312 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 2313 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 2314 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 2315 nf_ct_labels_ext_add(ct); 2316 nfct_seqadj_ext_add(ct); 2317 nfct_synproxy_ext_add(ct); 2318 2319 /* we must add conntrack extensions before confirmation. */ 2320 ct->status |= IPS_CONFIRMED; 2321 2322 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 2323 __nf_ct_set_timeout(ct, timeout); 2324 2325 if (cda[CTA_STATUS]) { 2326 err = ctnetlink_change_status(ct, cda); 2327 if (err < 0) 2328 goto err2; 2329 } 2330 2331 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { 2332 err = ctnetlink_change_seq_adj(ct, cda); 2333 if (err < 0) 2334 goto err2; 2335 } 2336 2337 memset(&ct->proto, 0, sizeof(ct->proto)); 2338 if (cda[CTA_PROTOINFO]) { 2339 err = ctnetlink_change_protoinfo(ct, cda); 2340 if (err < 0) 2341 goto err2; 2342 } 2343 2344 if (cda[CTA_SYNPROXY]) { 2345 err = ctnetlink_change_synproxy(ct, cda); 2346 if (err < 0) 2347 goto err2; 2348 } 2349 2350 #if defined(CONFIG_NF_CONNTRACK_MARK) 2351 if (cda[CTA_MARK]) 2352 ctnetlink_change_mark(ct, cda); 2353 #endif 2354 2355 /* setup master conntrack: this is a confirmed expectation */ 2356 if (cda[CTA_TUPLE_MASTER]) { 2357 struct nf_conntrack_tuple master; 2358 struct nf_conntrack_tuple_hash *master_h; 2359 struct nf_conn *master_ct; 2360 2361 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, 2362 u3, NULL); 2363 if (err < 0) 2364 goto err2; 2365 2366 master_h = nf_conntrack_find_get(net, zone, &master); 2367 if (master_h == NULL) { 2368 err = -ENOENT; 2369 goto err2; 2370 } 2371 master_ct = nf_ct_tuplehash_to_ctrack(master_h); 2372 __set_bit(IPS_EXPECTED_BIT, &ct->status); 2373 ct->master = master_ct; 2374 } 2375 tstamp = nf_conn_tstamp_find(ct); 2376 if (tstamp) 2377 tstamp->start = ktime_get_real_ns(); 2378 2379 err = nf_conntrack_hash_check_insert(ct); 2380 if (err < 0) 2381 goto err3; 2382 2383 rcu_read_unlock(); 2384 2385 return ct; 2386 2387 err3: 2388 if (ct->master) 2389 nf_ct_put(ct->master); 2390 err2: 2391 rcu_read_unlock(); 2392 err1: 2393 nf_conntrack_free(ct); 2394 return ERR_PTR(err); 2395 } 2396 2397 static int ctnetlink_new_conntrack(struct sk_buff *skb, 2398 const struct nfnl_info *info, 2399 const struct nlattr * const cda[]) 2400 { 2401 struct nf_conntrack_tuple otuple, rtuple; 2402 struct nf_conntrack_tuple_hash *h = NULL; 2403 u_int8_t u3 = info->nfmsg->nfgen_family; 2404 struct nf_conntrack_zone zone; 2405 struct nf_conn *ct; 2406 int err; 2407 2408 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 2409 if (err < 0) 2410 return err; 2411 2412 if (cda[CTA_TUPLE_ORIG]) { 2413 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, 2414 u3, &zone); 2415 if (err < 0) 2416 return err; 2417 } 2418 2419 if (cda[CTA_TUPLE_REPLY]) { 2420 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, 2421 u3, &zone); 2422 if (err < 0) 2423 return err; 2424 } 2425 2426 if (cda[CTA_TUPLE_ORIG]) 2427 h = nf_conntrack_find_get(info->net, &zone, &otuple); 2428 else if (cda[CTA_TUPLE_REPLY]) 2429 h = nf_conntrack_find_get(info->net, &zone, &rtuple); 2430 2431 if (h == NULL) { 2432 err = -ENOENT; 2433 if (info->nlh->nlmsg_flags & NLM_F_CREATE) { 2434 enum ip_conntrack_events events; 2435 2436 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) 2437 return -EINVAL; 2438 if (otuple.dst.protonum != rtuple.dst.protonum) 2439 return -EINVAL; 2440 2441 ct = ctnetlink_create_conntrack(info->net, &zone, cda, 2442 &otuple, &rtuple, u3); 2443 if (IS_ERR(ct)) 2444 return PTR_ERR(ct); 2445 2446 err = 0; 2447 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 2448 events = 1 << IPCT_RELATED; 2449 else 2450 events = 1 << IPCT_NEW; 2451 2452 if (cda[CTA_LABELS] && 2453 ctnetlink_attach_labels(ct, cda) == 0) 2454 events |= (1 << IPCT_LABEL); 2455 2456 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 2457 (1 << IPCT_ASSURED) | 2458 (1 << IPCT_HELPER) | 2459 (1 << IPCT_PROTOINFO) | 2460 (1 << IPCT_SEQADJ) | 2461 (1 << IPCT_MARK) | 2462 (1 << IPCT_SYNPROXY) | 2463 events, 2464 ct, NETLINK_CB(skb).portid, 2465 nlmsg_report(info->nlh)); 2466 nf_ct_put(ct); 2467 } 2468 2469 return err; 2470 } 2471 /* implicit 'else' */ 2472 2473 err = -EEXIST; 2474 ct = nf_ct_tuplehash_to_ctrack(h); 2475 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) { 2476 err = ctnetlink_change_conntrack(ct, cda); 2477 if (err == 0) { 2478 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 2479 (1 << IPCT_ASSURED) | 2480 (1 << IPCT_HELPER) | 2481 (1 << IPCT_LABEL) | 2482 (1 << IPCT_PROTOINFO) | 2483 (1 << IPCT_SEQADJ) | 2484 (1 << IPCT_MARK) | 2485 (1 << IPCT_SYNPROXY), 2486 ct, NETLINK_CB(skb).portid, 2487 nlmsg_report(info->nlh)); 2488 } 2489 } 2490 2491 nf_ct_put(ct); 2492 return err; 2493 } 2494 2495 static int 2496 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 2497 __u16 cpu, const struct ip_conntrack_stat *st) 2498 { 2499 struct nlmsghdr *nlh; 2500 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 2501 2502 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, 2503 IPCTNL_MSG_CT_GET_STATS_CPU); 2504 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 2505 NFNETLINK_V0, htons(cpu)); 2506 if (!nlh) 2507 goto nlmsg_failure; 2508 2509 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || 2510 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || 2511 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || 2512 nla_put_be32(skb, CTA_STATS_INSERT_FAILED, 2513 htonl(st->insert_failed)) || 2514 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) || 2515 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) || 2516 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) || 2517 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART, 2518 htonl(st->search_restart)) || 2519 nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE, 2520 htonl(st->clash_resolve)) || 2521 nla_put_be32(skb, CTA_STATS_CHAIN_TOOLONG, 2522 htonl(st->chaintoolong))) 2523 goto nla_put_failure; 2524 2525 nlmsg_end(skb, nlh); 2526 return skb->len; 2527 2528 nla_put_failure: 2529 nlmsg_failure: 2530 nlmsg_cancel(skb, nlh); 2531 return -1; 2532 } 2533 2534 static int 2535 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) 2536 { 2537 int cpu; 2538 struct net *net = sock_net(skb->sk); 2539 2540 if (cb->args[0] == nr_cpu_ids) 2541 return 0; 2542 2543 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 2544 const struct ip_conntrack_stat *st; 2545 2546 if (!cpu_possible(cpu)) 2547 continue; 2548 2549 st = per_cpu_ptr(net->ct.stat, cpu); 2550 if (ctnetlink_ct_stat_cpu_fill_info(skb, 2551 NETLINK_CB(cb->skb).portid, 2552 cb->nlh->nlmsg_seq, 2553 cpu, st) < 0) 2554 break; 2555 } 2556 cb->args[0] = cpu; 2557 2558 return skb->len; 2559 } 2560 2561 static int ctnetlink_stat_ct_cpu(struct sk_buff *skb, 2562 const struct nfnl_info *info, 2563 const struct nlattr * const cda[]) 2564 { 2565 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 2566 struct netlink_dump_control c = { 2567 .dump = ctnetlink_ct_stat_cpu_dump, 2568 }; 2569 return netlink_dump_start(info->sk, skb, info->nlh, &c); 2570 } 2571 2572 return 0; 2573 } 2574 2575 static int 2576 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, 2577 struct net *net) 2578 { 2579 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 2580 unsigned int nr_conntracks; 2581 struct nlmsghdr *nlh; 2582 2583 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); 2584 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 2585 NFNETLINK_V0, 0); 2586 if (!nlh) 2587 goto nlmsg_failure; 2588 2589 nr_conntracks = nf_conntrack_count(net); 2590 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks))) 2591 goto nla_put_failure; 2592 2593 if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max))) 2594 goto nla_put_failure; 2595 2596 nlmsg_end(skb, nlh); 2597 return skb->len; 2598 2599 nla_put_failure: 2600 nlmsg_failure: 2601 nlmsg_cancel(skb, nlh); 2602 return -1; 2603 } 2604 2605 static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info, 2606 const struct nlattr * const cda[]) 2607 { 2608 struct sk_buff *skb2; 2609 int err; 2610 2611 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2612 if (skb2 == NULL) 2613 return -ENOMEM; 2614 2615 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid, 2616 info->nlh->nlmsg_seq, 2617 NFNL_MSG_TYPE(info->nlh->nlmsg_type), 2618 sock_net(skb->sk)); 2619 if (err <= 0) { 2620 kfree_skb(skb2); 2621 return -ENOMEM; 2622 } 2623 2624 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 2625 } 2626 2627 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 2628 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, 2629 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, 2630 [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, 2631 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 2632 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 2633 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING, 2634 .len = NF_CT_HELPER_NAME_LEN - 1 }, 2635 [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, 2636 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, 2637 [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, 2638 [CTA_EXPECT_NAT] = { .type = NLA_NESTED }, 2639 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING }, 2640 }; 2641 2642 static struct nf_conntrack_expect * 2643 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct, 2644 struct nf_conntrack_helper *helper, 2645 struct nf_conntrack_tuple *tuple, 2646 struct nf_conntrack_tuple *mask); 2647 2648 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 2649 static size_t 2650 ctnetlink_glue_build_size(const struct nf_conn *ct) 2651 { 2652 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 2653 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 2654 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 2655 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ 2656 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ 2657 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ 2658 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ 2659 + nla_total_size(0) /* CTA_PROTOINFO */ 2660 + nla_total_size(0) /* CTA_HELP */ 2661 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 2662 + ctnetlink_secctx_size(ct) 2663 + ctnetlink_acct_size(ct) 2664 + ctnetlink_timestamp_size(ct) 2665 #if IS_ENABLED(CONFIG_NF_NAT) 2666 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 2667 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ 2668 #endif 2669 #ifdef CONFIG_NF_CONNTRACK_MARK 2670 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2671 #endif 2672 #ifdef CONFIG_NF_CONNTRACK_ZONES 2673 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ 2674 #endif 2675 + ctnetlink_proto_size(ct) 2676 ; 2677 } 2678 2679 static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) 2680 { 2681 const struct nf_conntrack_zone *zone; 2682 struct nlattr *nest_parms; 2683 2684 zone = nf_ct_zone(ct); 2685 2686 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 2687 if (!nest_parms) 2688 goto nla_put_failure; 2689 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 2690 goto nla_put_failure; 2691 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 2692 NF_CT_ZONE_DIR_ORIG) < 0) 2693 goto nla_put_failure; 2694 nla_nest_end(skb, nest_parms); 2695 2696 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 2697 if (!nest_parms) 2698 goto nla_put_failure; 2699 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 2700 goto nla_put_failure; 2701 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 2702 NF_CT_ZONE_DIR_REPL) < 0) 2703 goto nla_put_failure; 2704 nla_nest_end(skb, nest_parms); 2705 2706 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 2707 NF_CT_DEFAULT_ZONE_DIR) < 0) 2708 goto nla_put_failure; 2709 2710 if (ctnetlink_dump_id(skb, ct) < 0) 2711 goto nla_put_failure; 2712 2713 if (ctnetlink_dump_status(skb, ct) < 0) 2714 goto nla_put_failure; 2715 2716 if (ctnetlink_dump_timeout(skb, ct, false) < 0) 2717 goto nla_put_failure; 2718 2719 if (ctnetlink_dump_protoinfo(skb, ct, false) < 0) 2720 goto nla_put_failure; 2721 2722 if (ctnetlink_dump_acct(skb, ct, IPCTNL_MSG_CT_GET) < 0 || 2723 ctnetlink_dump_timestamp(skb, ct) < 0) 2724 goto nla_put_failure; 2725 2726 if (ctnetlink_dump_helpinfo(skb, ct) < 0) 2727 goto nla_put_failure; 2728 2729 #ifdef CONFIG_NF_CONNTRACK_SECMARK 2730 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0) 2731 goto nla_put_failure; 2732 #endif 2733 if (ct->master && ctnetlink_dump_master(skb, ct) < 0) 2734 goto nla_put_failure; 2735 2736 if ((ct->status & IPS_SEQ_ADJUST) && 2737 ctnetlink_dump_ct_seq_adj(skb, ct) < 0) 2738 goto nla_put_failure; 2739 2740 if (ctnetlink_dump_ct_synproxy(skb, ct) < 0) 2741 goto nla_put_failure; 2742 2743 #ifdef CONFIG_NF_CONNTRACK_MARK 2744 if (ctnetlink_dump_mark(skb, ct, true) < 0) 2745 goto nla_put_failure; 2746 #endif 2747 if (ctnetlink_dump_labels(skb, ct) < 0) 2748 goto nla_put_failure; 2749 return 0; 2750 2751 nla_put_failure: 2752 return -ENOSPC; 2753 } 2754 2755 static int 2756 ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct, 2757 enum ip_conntrack_info ctinfo, 2758 u_int16_t ct_attr, u_int16_t ct_info_attr) 2759 { 2760 struct nlattr *nest_parms; 2761 2762 nest_parms = nla_nest_start(skb, ct_attr); 2763 if (!nest_parms) 2764 goto nla_put_failure; 2765 2766 if (__ctnetlink_glue_build(skb, ct) < 0) 2767 goto nla_put_failure; 2768 2769 nla_nest_end(skb, nest_parms); 2770 2771 if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo))) 2772 goto nla_put_failure; 2773 2774 return 0; 2775 2776 nla_put_failure: 2777 return -ENOSPC; 2778 } 2779 2780 static int 2781 ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[]) 2782 { 2783 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); 2784 unsigned long d = ct->status ^ status; 2785 2786 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 2787 /* SEEN_REPLY bit can only be set */ 2788 return -EBUSY; 2789 2790 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 2791 /* ASSURED bit can only be set */ 2792 return -EBUSY; 2793 2794 /* This check is less strict than ctnetlink_change_status() 2795 * because callers often flip IPS_EXPECTED bits when sending 2796 * an NFQA_CT attribute to the kernel. So ignore the 2797 * unchangeable bits but do not error out. Also user programs 2798 * are allowed to clear the bits that they are allowed to change. 2799 */ 2800 __nf_ct_change_status(ct, status, ~status); 2801 return 0; 2802 } 2803 2804 static int 2805 ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct) 2806 { 2807 int err; 2808 2809 if (cda[CTA_TIMEOUT]) { 2810 err = ctnetlink_change_timeout(ct, cda); 2811 if (err < 0) 2812 return err; 2813 } 2814 if (cda[CTA_STATUS]) { 2815 err = ctnetlink_update_status(ct, cda); 2816 if (err < 0) 2817 return err; 2818 } 2819 if (cda[CTA_HELP]) { 2820 err = ctnetlink_change_helper(ct, cda); 2821 if (err < 0) 2822 return err; 2823 } 2824 if (cda[CTA_LABELS]) { 2825 err = ctnetlink_attach_labels(ct, cda); 2826 if (err < 0) 2827 return err; 2828 } 2829 #if defined(CONFIG_NF_CONNTRACK_MARK) 2830 if (cda[CTA_MARK]) { 2831 ctnetlink_change_mark(ct, cda); 2832 } 2833 #endif 2834 return 0; 2835 } 2836 2837 static int 2838 ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct) 2839 { 2840 struct nlattr *cda[CTA_MAX+1]; 2841 int ret; 2842 2843 ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy, 2844 NULL); 2845 if (ret < 0) 2846 return ret; 2847 2848 return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct); 2849 } 2850 2851 static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda, 2852 const struct nf_conn *ct, 2853 struct nf_conntrack_tuple *tuple, 2854 struct nf_conntrack_tuple *mask) 2855 { 2856 int err; 2857 2858 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE, 2859 nf_ct_l3num(ct), NULL); 2860 if (err < 0) 2861 return err; 2862 2863 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK, 2864 nf_ct_l3num(ct), NULL); 2865 } 2866 2867 static int 2868 ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, 2869 u32 portid, u32 report) 2870 { 2871 struct nlattr *cda[CTA_EXPECT_MAX+1]; 2872 struct nf_conntrack_tuple tuple, mask; 2873 struct nf_conntrack_helper *helper = NULL; 2874 struct nf_conntrack_expect *exp; 2875 int err; 2876 2877 err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr, 2878 exp_nla_policy, NULL); 2879 if (err < 0) 2880 return err; 2881 2882 err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda, 2883 ct, &tuple, &mask); 2884 if (err < 0) 2885 return err; 2886 2887 if (cda[CTA_EXPECT_HELP_NAME]) { 2888 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 2889 2890 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2891 nf_ct_protonum(ct)); 2892 if (helper == NULL) 2893 return -EOPNOTSUPP; 2894 } 2895 2896 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct, 2897 helper, &tuple, &mask); 2898 if (IS_ERR(exp)) 2899 return PTR_ERR(exp); 2900 2901 err = nf_ct_expect_related_report(exp, portid, report, 0); 2902 nf_ct_expect_put(exp); 2903 return err; 2904 } 2905 2906 static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, 2907 enum ip_conntrack_info ctinfo, int diff) 2908 { 2909 if (!(ct->status & IPS_NAT_MASK)) 2910 return; 2911 2912 nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff); 2913 } 2914 2915 static const struct nfnl_ct_hook ctnetlink_glue_hook = { 2916 .build_size = ctnetlink_glue_build_size, 2917 .build = ctnetlink_glue_build, 2918 .parse = ctnetlink_glue_parse, 2919 .attach_expect = ctnetlink_glue_attach_expect, 2920 .seq_adjust = ctnetlink_glue_seqadj, 2921 }; 2922 #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */ 2923 2924 /*********************************************************************** 2925 * EXPECT 2926 ***********************************************************************/ 2927 2928 static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2929 const struct nf_conntrack_tuple *tuple, 2930 u32 type) 2931 { 2932 struct nlattr *nest_parms; 2933 2934 nest_parms = nla_nest_start(skb, type); 2935 if (!nest_parms) 2936 goto nla_put_failure; 2937 if (ctnetlink_dump_tuples(skb, tuple) < 0) 2938 goto nla_put_failure; 2939 nla_nest_end(skb, nest_parms); 2940 2941 return 0; 2942 2943 nla_put_failure: 2944 return -1; 2945 } 2946 2947 static int ctnetlink_exp_dump_mask(struct sk_buff *skb, 2948 const struct nf_conntrack_tuple *tuple, 2949 const struct nf_conntrack_tuple_mask *mask) 2950 { 2951 const struct nf_conntrack_l4proto *l4proto; 2952 struct nf_conntrack_tuple m; 2953 struct nlattr *nest_parms; 2954 int ret; 2955 2956 memset(&m, 0xFF, sizeof(m)); 2957 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); 2958 m.src.u.all = mask->src.u.all; 2959 m.src.l3num = tuple->src.l3num; 2960 m.dst.protonum = tuple->dst.protonum; 2961 2962 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK); 2963 if (!nest_parms) 2964 goto nla_put_failure; 2965 2966 rcu_read_lock(); 2967 ret = ctnetlink_dump_tuples_ip(skb, &m); 2968 if (ret >= 0) { 2969 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 2970 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 2971 } 2972 rcu_read_unlock(); 2973 2974 if (unlikely(ret < 0)) 2975 goto nla_put_failure; 2976 2977 nla_nest_end(skb, nest_parms); 2978 2979 return 0; 2980 2981 nla_put_failure: 2982 return -1; 2983 } 2984 2985 #if IS_ENABLED(CONFIG_NF_NAT) 2986 static const union nf_inet_addr any_addr; 2987 #endif 2988 2989 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) 2990 { 2991 static siphash_aligned_key_t exp_id_seed; 2992 unsigned long a, b, c, d; 2993 2994 net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); 2995 2996 a = (unsigned long)exp; 2997 b = (unsigned long)exp->helper; 2998 c = (unsigned long)exp->master; 2999 d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed); 3000 3001 #ifdef CONFIG_64BIT 3002 return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed); 3003 #else 3004 return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed); 3005 #endif 3006 } 3007 3008 static int 3009 ctnetlink_exp_dump_expect(struct sk_buff *skb, 3010 const struct nf_conntrack_expect *exp) 3011 { 3012 struct nf_conn *master = exp->master; 3013 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; 3014 struct nf_conn_help *help; 3015 #if IS_ENABLED(CONFIG_NF_NAT) 3016 struct nlattr *nest_parms; 3017 struct nf_conntrack_tuple nat_tuple = {}; 3018 #endif 3019 struct nf_ct_helper_expectfn *expfn; 3020 3021 if (timeout < 0) 3022 timeout = 0; 3023 3024 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) 3025 goto nla_put_failure; 3026 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) 3027 goto nla_put_failure; 3028 if (ctnetlink_exp_dump_tuple(skb, 3029 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 3030 CTA_EXPECT_MASTER) < 0) 3031 goto nla_put_failure; 3032 3033 #if IS_ENABLED(CONFIG_NF_NAT) 3034 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) || 3035 exp->saved_proto.all) { 3036 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT); 3037 if (!nest_parms) 3038 goto nla_put_failure; 3039 3040 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir))) 3041 goto nla_put_failure; 3042 3043 nat_tuple.src.l3num = nf_ct_l3num(master); 3044 nat_tuple.src.u3 = exp->saved_addr; 3045 nat_tuple.dst.protonum = nf_ct_protonum(master); 3046 nat_tuple.src.u = exp->saved_proto; 3047 3048 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple, 3049 CTA_EXPECT_NAT_TUPLE) < 0) 3050 goto nla_put_failure; 3051 nla_nest_end(skb, nest_parms); 3052 } 3053 #endif 3054 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || 3055 nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) || 3056 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || 3057 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) 3058 goto nla_put_failure; 3059 help = nfct_help(master); 3060 if (help) { 3061 struct nf_conntrack_helper *helper; 3062 3063 helper = rcu_dereference(help->helper); 3064 if (helper && 3065 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name)) 3066 goto nla_put_failure; 3067 } 3068 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); 3069 if (expfn != NULL && 3070 nla_put_string(skb, CTA_EXPECT_FN, expfn->name)) 3071 goto nla_put_failure; 3072 3073 return 0; 3074 3075 nla_put_failure: 3076 return -1; 3077 } 3078 3079 static int 3080 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 3081 int event, const struct nf_conntrack_expect *exp) 3082 { 3083 struct nlmsghdr *nlh; 3084 unsigned int flags = portid ? NLM_F_MULTI : 0; 3085 3086 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); 3087 nlh = nfnl_msg_put(skb, portid, seq, event, flags, 3088 exp->tuple.src.l3num, NFNETLINK_V0, 0); 3089 if (!nlh) 3090 goto nlmsg_failure; 3091 3092 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 3093 goto nla_put_failure; 3094 3095 nlmsg_end(skb, nlh); 3096 return skb->len; 3097 3098 nlmsg_failure: 3099 nla_put_failure: 3100 nlmsg_cancel(skb, nlh); 3101 return -1; 3102 } 3103 3104 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3105 static int 3106 ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item) 3107 { 3108 struct nf_conntrack_expect *exp = item->exp; 3109 struct net *net = nf_ct_exp_net(exp); 3110 struct nlmsghdr *nlh; 3111 struct sk_buff *skb; 3112 unsigned int type, group; 3113 int flags = 0; 3114 3115 if (events & (1 << IPEXP_DESTROY)) { 3116 type = IPCTNL_MSG_EXP_DELETE; 3117 group = NFNLGRP_CONNTRACK_EXP_DESTROY; 3118 } else if (events & (1 << IPEXP_NEW)) { 3119 type = IPCTNL_MSG_EXP_NEW; 3120 flags = NLM_F_CREATE|NLM_F_EXCL; 3121 group = NFNLGRP_CONNTRACK_EXP_NEW; 3122 } else 3123 return 0; 3124 3125 if (!item->report && !nfnetlink_has_listeners(net, group)) 3126 return 0; 3127 3128 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 3129 if (skb == NULL) 3130 goto errout; 3131 3132 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type); 3133 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, 3134 exp->tuple.src.l3num, NFNETLINK_V0, 0); 3135 if (!nlh) 3136 goto nlmsg_failure; 3137 3138 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 3139 goto nla_put_failure; 3140 3141 nlmsg_end(skb, nlh); 3142 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); 3143 return 0; 3144 3145 nla_put_failure: 3146 nlmsg_cancel(skb, nlh); 3147 nlmsg_failure: 3148 kfree_skb(skb); 3149 errout: 3150 nfnetlink_set_err(net, 0, 0, -ENOBUFS); 3151 return 0; 3152 } 3153 #endif 3154 3155 static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp) 3156 { 3157 unsigned long id = (unsigned long)exp; 3158 3159 id += nf_ct_get_id(exp->master); 3160 id += exp->class; 3161 3162 return id ? id : 1; 3163 } 3164 3165 static int 3166 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3167 { 3168 struct net *net = sock_net(skb->sk); 3169 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3170 u_int8_t l3proto = nfmsg->nfgen_family; 3171 unsigned long last_id = cb->args[1]; 3172 struct nf_conntrack_expect *exp; 3173 3174 rcu_read_lock(); 3175 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 3176 restart: 3177 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], 3178 hnode) { 3179 if (l3proto && exp->tuple.src.l3num != l3proto) 3180 continue; 3181 3182 if (!net_eq(nf_ct_net(exp->master), net)) 3183 continue; 3184 3185 if (cb->args[1]) { 3186 if (ctnetlink_exp_id(exp) != last_id) 3187 continue; 3188 cb->args[1] = 0; 3189 } 3190 if (ctnetlink_exp_fill_info(skb, 3191 NETLINK_CB(cb->skb).portid, 3192 cb->nlh->nlmsg_seq, 3193 IPCTNL_MSG_EXP_NEW, 3194 exp) < 0) { 3195 cb->args[1] = ctnetlink_exp_id(exp); 3196 goto out; 3197 } 3198 } 3199 if (cb->args[1]) { 3200 cb->args[1] = 0; 3201 goto restart; 3202 } 3203 } 3204 out: 3205 rcu_read_unlock(); 3206 return skb->len; 3207 } 3208 3209 static int 3210 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3211 { 3212 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3213 struct nf_conn *ct = cb->data; 3214 struct nf_conn_help *help = nfct_help(ct); 3215 u_int8_t l3proto = nfmsg->nfgen_family; 3216 unsigned long last_id = cb->args[1]; 3217 struct nf_conntrack_expect *exp; 3218 3219 if (cb->args[0]) 3220 return 0; 3221 3222 rcu_read_lock(); 3223 3224 restart: 3225 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { 3226 if (l3proto && exp->tuple.src.l3num != l3proto) 3227 continue; 3228 if (cb->args[1]) { 3229 if (ctnetlink_exp_id(exp) != last_id) 3230 continue; 3231 cb->args[1] = 0; 3232 } 3233 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid, 3234 cb->nlh->nlmsg_seq, 3235 IPCTNL_MSG_EXP_NEW, 3236 exp) < 0) { 3237 cb->args[1] = ctnetlink_exp_id(exp); 3238 goto out; 3239 } 3240 } 3241 if (cb->args[1]) { 3242 cb->args[1] = 0; 3243 goto restart; 3244 } 3245 cb->args[0] = 1; 3246 out: 3247 rcu_read_unlock(); 3248 return skb->len; 3249 } 3250 3251 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, 3252 struct sk_buff *skb, 3253 const struct nlmsghdr *nlh, 3254 const struct nlattr * const cda[], 3255 struct netlink_ext_ack *extack) 3256 { 3257 int err; 3258 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3259 u_int8_t u3 = nfmsg->nfgen_family; 3260 struct nf_conntrack_tuple tuple; 3261 struct nf_conntrack_tuple_hash *h; 3262 struct nf_conn *ct; 3263 struct nf_conntrack_zone zone; 3264 struct netlink_dump_control c = { 3265 .dump = ctnetlink_exp_ct_dump_table, 3266 }; 3267 3268 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, 3269 u3, NULL); 3270 if (err < 0) 3271 return err; 3272 3273 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3274 if (err < 0) 3275 return err; 3276 3277 h = nf_conntrack_find_get(net, &zone, &tuple); 3278 if (!h) 3279 return -ENOENT; 3280 3281 ct = nf_ct_tuplehash_to_ctrack(h); 3282 /* No expectation linked to this connection tracking. */ 3283 if (!nfct_help(ct)) { 3284 nf_ct_put(ct); 3285 return 0; 3286 } 3287 3288 c.data = ct; 3289 3290 err = netlink_dump_start(ctnl, skb, nlh, &c); 3291 nf_ct_put(ct); 3292 3293 return err; 3294 } 3295 3296 static int ctnetlink_get_expect(struct sk_buff *skb, 3297 const struct nfnl_info *info, 3298 const struct nlattr * const cda[]) 3299 { 3300 u_int8_t u3 = info->nfmsg->nfgen_family; 3301 struct nf_conntrack_tuple tuple; 3302 struct nf_conntrack_expect *exp; 3303 struct nf_conntrack_zone zone; 3304 struct sk_buff *skb2; 3305 int err; 3306 3307 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 3308 if (cda[CTA_EXPECT_MASTER]) 3309 return ctnetlink_dump_exp_ct(info->net, info->sk, skb, 3310 info->nlh, cda, 3311 info->extack); 3312 else { 3313 struct netlink_dump_control c = { 3314 .dump = ctnetlink_exp_dump_table, 3315 }; 3316 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3317 } 3318 } 3319 3320 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3321 if (err < 0) 3322 return err; 3323 3324 if (cda[CTA_EXPECT_TUPLE]) 3325 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3326 u3, NULL); 3327 else if (cda[CTA_EXPECT_MASTER]) 3328 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, 3329 u3, NULL); 3330 else 3331 return -EINVAL; 3332 3333 if (err < 0) 3334 return err; 3335 3336 exp = nf_ct_expect_find_get(info->net, &zone, &tuple); 3337 if (!exp) 3338 return -ENOENT; 3339 3340 if (cda[CTA_EXPECT_ID]) { 3341 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); 3342 3343 if (id != nf_expect_get_id(exp)) { 3344 nf_ct_expect_put(exp); 3345 return -ENOENT; 3346 } 3347 } 3348 3349 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3350 if (!skb2) { 3351 nf_ct_expect_put(exp); 3352 return -ENOMEM; 3353 } 3354 3355 rcu_read_lock(); 3356 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid, 3357 info->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, 3358 exp); 3359 rcu_read_unlock(); 3360 nf_ct_expect_put(exp); 3361 if (err <= 0) { 3362 kfree_skb(skb2); 3363 return -ENOMEM; 3364 } 3365 3366 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 3367 } 3368 3369 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data) 3370 { 3371 struct nf_conntrack_helper *helper; 3372 const struct nf_conn_help *m_help; 3373 const char *name = data; 3374 3375 m_help = nfct_help(exp->master); 3376 3377 helper = rcu_dereference(m_help->helper); 3378 if (!helper) 3379 return false; 3380 3381 return strcmp(helper->name, name) == 0; 3382 } 3383 3384 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data) 3385 { 3386 return true; 3387 } 3388 3389 static int ctnetlink_del_expect(struct sk_buff *skb, 3390 const struct nfnl_info *info, 3391 const struct nlattr * const cda[]) 3392 { 3393 u_int8_t u3 = info->nfmsg->nfgen_family; 3394 struct nf_conntrack_expect *exp; 3395 struct nf_conntrack_tuple tuple; 3396 struct nf_conntrack_zone zone; 3397 int err; 3398 3399 if (cda[CTA_EXPECT_TUPLE]) { 3400 /* delete a single expect by tuple */ 3401 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3402 if (err < 0) 3403 return err; 3404 3405 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3406 u3, NULL); 3407 if (err < 0) 3408 return err; 3409 3410 /* bump usage count to 2 */ 3411 exp = nf_ct_expect_find_get(info->net, &zone, &tuple); 3412 if (!exp) 3413 return -ENOENT; 3414 3415 if (cda[CTA_EXPECT_ID]) { 3416 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); 3417 3418 if (id != nf_expect_get_id(exp)) { 3419 nf_ct_expect_put(exp); 3420 return -ENOENT; 3421 } 3422 } 3423 3424 /* after list removal, usage count == 1 */ 3425 spin_lock_bh(&nf_conntrack_expect_lock); 3426 if (timer_delete(&exp->timeout)) { 3427 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, 3428 nlmsg_report(info->nlh)); 3429 nf_ct_expect_put(exp); 3430 } 3431 spin_unlock_bh(&nf_conntrack_expect_lock); 3432 /* have to put what we 'get' above. 3433 * after this line usage count == 0 */ 3434 nf_ct_expect_put(exp); 3435 } else if (cda[CTA_EXPECT_HELP_NAME]) { 3436 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3437 3438 nf_ct_expect_iterate_net(info->net, expect_iter_name, name, 3439 NETLINK_CB(skb).portid, 3440 nlmsg_report(info->nlh)); 3441 } else { 3442 /* This basically means we have to flush everything*/ 3443 nf_ct_expect_iterate_net(info->net, expect_iter_all, NULL, 3444 NETLINK_CB(skb).portid, 3445 nlmsg_report(info->nlh)); 3446 } 3447 3448 return 0; 3449 } 3450 static int 3451 ctnetlink_change_expect(struct nf_conntrack_expect *x, 3452 const struct nlattr * const cda[]) 3453 { 3454 if (cda[CTA_EXPECT_TIMEOUT]) { 3455 if (!timer_delete(&x->timeout)) 3456 return -ETIME; 3457 3458 x->timeout.expires = jiffies + 3459 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; 3460 add_timer(&x->timeout); 3461 } 3462 return 0; 3463 } 3464 3465 #if IS_ENABLED(CONFIG_NF_NAT) 3466 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { 3467 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, 3468 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, 3469 }; 3470 #endif 3471 3472 static int 3473 ctnetlink_parse_expect_nat(const struct nlattr *attr, 3474 struct nf_conntrack_expect *exp, 3475 u_int8_t u3) 3476 { 3477 #if IS_ENABLED(CONFIG_NF_NAT) 3478 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1]; 3479 struct nf_conntrack_tuple nat_tuple = {}; 3480 int err; 3481 3482 err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr, 3483 exp_nat_nla_policy, NULL); 3484 if (err < 0) 3485 return err; 3486 3487 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE]) 3488 return -EINVAL; 3489 3490 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb, 3491 &nat_tuple, CTA_EXPECT_NAT_TUPLE, 3492 u3, NULL); 3493 if (err < 0) 3494 return err; 3495 3496 exp->saved_addr = nat_tuple.src.u3; 3497 exp->saved_proto = nat_tuple.src.u; 3498 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR])); 3499 3500 return 0; 3501 #else 3502 return -EOPNOTSUPP; 3503 #endif 3504 } 3505 3506 static struct nf_conntrack_expect * 3507 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, 3508 struct nf_conntrack_helper *helper, 3509 struct nf_conntrack_tuple *tuple, 3510 struct nf_conntrack_tuple *mask) 3511 { 3512 u_int32_t class = 0; 3513 struct nf_conntrack_expect *exp; 3514 struct nf_conn_help *help; 3515 int err; 3516 3517 help = nfct_help(ct); 3518 if (!help) 3519 return ERR_PTR(-EOPNOTSUPP); 3520 3521 if (cda[CTA_EXPECT_CLASS] && helper) { 3522 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); 3523 if (class > helper->expect_class_max) 3524 return ERR_PTR(-EINVAL); 3525 } 3526 exp = nf_ct_expect_alloc(ct); 3527 if (!exp) 3528 return ERR_PTR(-ENOMEM); 3529 3530 if (cda[CTA_EXPECT_FLAGS]) { 3531 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); 3532 exp->flags &= ~NF_CT_EXPECT_USERSPACE; 3533 } else { 3534 exp->flags = 0; 3535 } 3536 if (cda[CTA_EXPECT_FN]) { 3537 const char *name = nla_data(cda[CTA_EXPECT_FN]); 3538 struct nf_ct_helper_expectfn *expfn; 3539 3540 expfn = nf_ct_helper_expectfn_find_by_name(name); 3541 if (expfn == NULL) { 3542 err = -EINVAL; 3543 goto err_out; 3544 } 3545 exp->expectfn = expfn->expectfn; 3546 } else 3547 exp->expectfn = NULL; 3548 3549 exp->class = class; 3550 exp->master = ct; 3551 exp->helper = helper; 3552 exp->tuple = *tuple; 3553 exp->mask.src.u3 = mask->src.u3; 3554 exp->mask.src.u.all = mask->src.u.all; 3555 3556 if (cda[CTA_EXPECT_NAT]) { 3557 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT], 3558 exp, nf_ct_l3num(ct)); 3559 if (err < 0) 3560 goto err_out; 3561 } 3562 return exp; 3563 err_out: 3564 nf_ct_expect_put(exp); 3565 return ERR_PTR(err); 3566 } 3567 3568 static int 3569 ctnetlink_create_expect(struct net *net, 3570 const struct nf_conntrack_zone *zone, 3571 const struct nlattr * const cda[], 3572 u_int8_t u3, u32 portid, int report) 3573 { 3574 struct nf_conntrack_tuple tuple, mask, master_tuple; 3575 struct nf_conntrack_tuple_hash *h = NULL; 3576 struct nf_conntrack_helper *helper = NULL; 3577 struct nf_conntrack_expect *exp; 3578 struct nf_conn *ct; 3579 int err; 3580 3581 /* caller guarantees that those three CTA_EXPECT_* exist */ 3582 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3583 u3, NULL); 3584 if (err < 0) 3585 return err; 3586 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, 3587 u3, NULL); 3588 if (err < 0) 3589 return err; 3590 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, 3591 u3, NULL); 3592 if (err < 0) 3593 return err; 3594 3595 /* Look for master conntrack of this expectation */ 3596 h = nf_conntrack_find_get(net, zone, &master_tuple); 3597 if (!h) 3598 return -ENOENT; 3599 ct = nf_ct_tuplehash_to_ctrack(h); 3600 3601 rcu_read_lock(); 3602 if (cda[CTA_EXPECT_HELP_NAME]) { 3603 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3604 3605 helper = __nf_conntrack_helper_find(helpname, u3, 3606 nf_ct_protonum(ct)); 3607 if (helper == NULL) { 3608 rcu_read_unlock(); 3609 #ifdef CONFIG_MODULES 3610 if (request_module("nfct-helper-%s", helpname) < 0) { 3611 err = -EOPNOTSUPP; 3612 goto err_ct; 3613 } 3614 rcu_read_lock(); 3615 helper = __nf_conntrack_helper_find(helpname, u3, 3616 nf_ct_protonum(ct)); 3617 if (helper) { 3618 err = -EAGAIN; 3619 goto err_rcu; 3620 } 3621 rcu_read_unlock(); 3622 #endif 3623 err = -EOPNOTSUPP; 3624 goto err_ct; 3625 } 3626 } 3627 3628 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3629 if (IS_ERR(exp)) { 3630 err = PTR_ERR(exp); 3631 goto err_rcu; 3632 } 3633 3634 err = nf_ct_expect_related_report(exp, portid, report, 0); 3635 nf_ct_expect_put(exp); 3636 err_rcu: 3637 rcu_read_unlock(); 3638 err_ct: 3639 nf_ct_put(ct); 3640 return err; 3641 } 3642 3643 static int ctnetlink_new_expect(struct sk_buff *skb, 3644 const struct nfnl_info *info, 3645 const struct nlattr * const cda[]) 3646 { 3647 u_int8_t u3 = info->nfmsg->nfgen_family; 3648 struct nf_conntrack_tuple tuple; 3649 struct nf_conntrack_expect *exp; 3650 struct nf_conntrack_zone zone; 3651 int err; 3652 3653 if (!cda[CTA_EXPECT_TUPLE] 3654 || !cda[CTA_EXPECT_MASK] 3655 || !cda[CTA_EXPECT_MASTER]) 3656 return -EINVAL; 3657 3658 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3659 if (err < 0) 3660 return err; 3661 3662 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3663 u3, NULL); 3664 if (err < 0) 3665 return err; 3666 3667 spin_lock_bh(&nf_conntrack_expect_lock); 3668 exp = __nf_ct_expect_find(info->net, &zone, &tuple); 3669 if (!exp) { 3670 spin_unlock_bh(&nf_conntrack_expect_lock); 3671 err = -ENOENT; 3672 if (info->nlh->nlmsg_flags & NLM_F_CREATE) { 3673 err = ctnetlink_create_expect(info->net, &zone, cda, u3, 3674 NETLINK_CB(skb).portid, 3675 nlmsg_report(info->nlh)); 3676 } 3677 return err; 3678 } 3679 3680 err = -EEXIST; 3681 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) 3682 err = ctnetlink_change_expect(exp, cda); 3683 spin_unlock_bh(&nf_conntrack_expect_lock); 3684 3685 return err; 3686 } 3687 3688 static int 3689 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, 3690 const struct ip_conntrack_stat *st) 3691 { 3692 struct nlmsghdr *nlh; 3693 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 3694 3695 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, 3696 IPCTNL_MSG_EXP_GET_STATS_CPU); 3697 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 3698 NFNETLINK_V0, htons(cpu)); 3699 if (!nlh) 3700 goto nlmsg_failure; 3701 3702 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) || 3703 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) || 3704 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete))) 3705 goto nla_put_failure; 3706 3707 nlmsg_end(skb, nlh); 3708 return skb->len; 3709 3710 nla_put_failure: 3711 nlmsg_failure: 3712 nlmsg_cancel(skb, nlh); 3713 return -1; 3714 } 3715 3716 static int 3717 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) 3718 { 3719 int cpu; 3720 struct net *net = sock_net(skb->sk); 3721 3722 if (cb->args[0] == nr_cpu_ids) 3723 return 0; 3724 3725 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 3726 const struct ip_conntrack_stat *st; 3727 3728 if (!cpu_possible(cpu)) 3729 continue; 3730 3731 st = per_cpu_ptr(net->ct.stat, cpu); 3732 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid, 3733 cb->nlh->nlmsg_seq, 3734 cpu, st) < 0) 3735 break; 3736 } 3737 cb->args[0] = cpu; 3738 3739 return skb->len; 3740 } 3741 3742 static int ctnetlink_stat_exp_cpu(struct sk_buff *skb, 3743 const struct nfnl_info *info, 3744 const struct nlattr * const cda[]) 3745 { 3746 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 3747 struct netlink_dump_control c = { 3748 .dump = ctnetlink_exp_stat_cpu_dump, 3749 }; 3750 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3751 } 3752 3753 return 0; 3754 } 3755 3756 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3757 static struct nf_ct_event_notifier ctnl_notifier = { 3758 .ct_event = ctnetlink_conntrack_event, 3759 .exp_event = ctnetlink_expect_event, 3760 }; 3761 #endif 3762 3763 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { 3764 [IPCTNL_MSG_CT_NEW] = { 3765 .call = ctnetlink_new_conntrack, 3766 .type = NFNL_CB_MUTEX, 3767 .attr_count = CTA_MAX, 3768 .policy = ct_nla_policy 3769 }, 3770 [IPCTNL_MSG_CT_GET] = { 3771 .call = ctnetlink_get_conntrack, 3772 .type = NFNL_CB_MUTEX, 3773 .attr_count = CTA_MAX, 3774 .policy = ct_nla_policy 3775 }, 3776 [IPCTNL_MSG_CT_DELETE] = { 3777 .call = ctnetlink_del_conntrack, 3778 .type = NFNL_CB_MUTEX, 3779 .attr_count = CTA_MAX, 3780 .policy = ct_nla_policy 3781 }, 3782 [IPCTNL_MSG_CT_GET_CTRZERO] = { 3783 .call = ctnetlink_get_conntrack, 3784 .type = NFNL_CB_MUTEX, 3785 .attr_count = CTA_MAX, 3786 .policy = ct_nla_policy 3787 }, 3788 [IPCTNL_MSG_CT_GET_STATS_CPU] = { 3789 .call = ctnetlink_stat_ct_cpu, 3790 .type = NFNL_CB_MUTEX, 3791 }, 3792 [IPCTNL_MSG_CT_GET_STATS] = { 3793 .call = ctnetlink_stat_ct, 3794 .type = NFNL_CB_MUTEX, 3795 }, 3796 [IPCTNL_MSG_CT_GET_DYING] = { 3797 .call = ctnetlink_get_ct_dying, 3798 .type = NFNL_CB_MUTEX, 3799 }, 3800 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { 3801 .call = ctnetlink_get_ct_unconfirmed, 3802 .type = NFNL_CB_MUTEX, 3803 }, 3804 }; 3805 3806 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { 3807 [IPCTNL_MSG_EXP_GET] = { 3808 .call = ctnetlink_get_expect, 3809 .type = NFNL_CB_MUTEX, 3810 .attr_count = CTA_EXPECT_MAX, 3811 .policy = exp_nla_policy 3812 }, 3813 [IPCTNL_MSG_EXP_NEW] = { 3814 .call = ctnetlink_new_expect, 3815 .type = NFNL_CB_MUTEX, 3816 .attr_count = CTA_EXPECT_MAX, 3817 .policy = exp_nla_policy 3818 }, 3819 [IPCTNL_MSG_EXP_DELETE] = { 3820 .call = ctnetlink_del_expect, 3821 .type = NFNL_CB_MUTEX, 3822 .attr_count = CTA_EXPECT_MAX, 3823 .policy = exp_nla_policy 3824 }, 3825 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { 3826 .call = ctnetlink_stat_exp_cpu, 3827 .type = NFNL_CB_MUTEX, 3828 }, 3829 }; 3830 3831 static const struct nfnetlink_subsystem ctnl_subsys = { 3832 .name = "conntrack", 3833 .subsys_id = NFNL_SUBSYS_CTNETLINK, 3834 .cb_count = IPCTNL_MSG_MAX, 3835 .cb = ctnl_cb, 3836 }; 3837 3838 static const struct nfnetlink_subsystem ctnl_exp_subsys = { 3839 .name = "conntrack_expect", 3840 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, 3841 .cb_count = IPCTNL_MSG_EXP_MAX, 3842 .cb = ctnl_exp_cb, 3843 }; 3844 3845 MODULE_ALIAS("ip_conntrack_netlink"); 3846 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 3847 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); 3848 3849 static int __net_init ctnetlink_net_init(struct net *net) 3850 { 3851 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3852 nf_conntrack_register_notifier(net, &ctnl_notifier); 3853 #endif 3854 return 0; 3855 } 3856 3857 static void ctnetlink_net_pre_exit(struct net *net) 3858 { 3859 #ifdef CONFIG_NF_CONNTRACK_EVENTS 3860 nf_conntrack_unregister_notifier(net); 3861 #endif 3862 } 3863 3864 static struct pernet_operations ctnetlink_net_ops = { 3865 .init = ctnetlink_net_init, 3866 .pre_exit = ctnetlink_net_pre_exit, 3867 }; 3868 3869 static int __init ctnetlink_init(void) 3870 { 3871 int ret; 3872 3873 NL_ASSERT_CTX_FITS(struct ctnetlink_list_dump_ctx); 3874 3875 ret = nfnetlink_subsys_register(&ctnl_subsys); 3876 if (ret < 0) { 3877 pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); 3878 goto err_out; 3879 } 3880 3881 ret = nfnetlink_subsys_register(&ctnl_exp_subsys); 3882 if (ret < 0) { 3883 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); 3884 goto err_unreg_subsys; 3885 } 3886 3887 ret = register_pernet_subsys(&ctnetlink_net_ops); 3888 if (ret < 0) { 3889 pr_err("ctnetlink_init: cannot register pernet operations\n"); 3890 goto err_unreg_exp_subsys; 3891 } 3892 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3893 /* setup interaction between nf_queue and nf_conntrack_netlink. */ 3894 RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook); 3895 #endif 3896 return 0; 3897 3898 err_unreg_exp_subsys: 3899 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 3900 err_unreg_subsys: 3901 nfnetlink_subsys_unregister(&ctnl_subsys); 3902 err_out: 3903 return ret; 3904 } 3905 3906 static void __exit ctnetlink_exit(void) 3907 { 3908 unregister_pernet_subsys(&ctnetlink_net_ops); 3909 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 3910 nfnetlink_subsys_unregister(&ctnl_subsys); 3911 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3912 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3913 #endif 3914 synchronize_rcu(); 3915 } 3916 3917 module_init(ctnetlink_init); 3918 module_exit(ctnetlink_exit); 3919