1 /* Netfilter messages via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>, 5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * 8 * Initial netfilter messages via netlink development funded and 9 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * 11 * Further development of this code funded by Astaro AG (http://www.astaro.com) 12 * 13 * This software may be used and distributed according to the terms 14 * of the GNU General Public License, incorporated herein by reference. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/sockios.h> 23 #include <linux/net.h> 24 #include <linux/skbuff.h> 25 #include <linux/uaccess.h> 26 #include <net/sock.h> 27 #include <linux/init.h> 28 #include <linux/sched/signal.h> 29 30 #include <net/netlink.h> 31 #include <net/netns/generic.h> 32 #include <linux/netfilter.h> 33 #include <linux/netfilter/nfnetlink.h> 34 35 MODULE_LICENSE("GPL"); 36 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 37 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); 38 MODULE_DESCRIPTION("Netfilter messages via netlink socket"); 39 40 #define nfnl_dereference_protected(id) \ 41 rcu_dereference_protected(table[(id)].subsys, \ 42 lockdep_nfnl_is_held((id))) 43 44 #define NFNL_MAX_ATTR_COUNT 32 45 46 static unsigned int nfnetlink_pernet_id __read_mostly; 47 48 #ifdef CONFIG_NF_CONNTRACK_EVENTS 49 static DEFINE_SPINLOCK(nfnl_grp_active_lock); 50 #endif 51 52 struct nfnl_net { 53 struct sock *nfnl; 54 }; 55 56 static struct { 57 struct mutex mutex; 58 const struct nfnetlink_subsystem __rcu *subsys; 59 } table[NFNL_SUBSYS_COUNT]; 60 61 static struct lock_class_key nfnl_lockdep_keys[NFNL_SUBSYS_COUNT]; 62 63 static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = { 64 [NFNL_SUBSYS_NONE] = "nfnl_subsys_none", 65 [NFNL_SUBSYS_CTNETLINK] = "nfnl_subsys_ctnetlink", 66 [NFNL_SUBSYS_CTNETLINK_EXP] = "nfnl_subsys_ctnetlink_exp", 67 [NFNL_SUBSYS_QUEUE] = "nfnl_subsys_queue", 68 [NFNL_SUBSYS_ULOG] = "nfnl_subsys_ulog", 69 [NFNL_SUBSYS_OSF] = "nfnl_subsys_osf", 70 [NFNL_SUBSYS_IPSET] = "nfnl_subsys_ipset", 71 [NFNL_SUBSYS_ACCT] = "nfnl_subsys_acct", 72 [NFNL_SUBSYS_CTNETLINK_TIMEOUT] = "nfnl_subsys_cttimeout", 73 [NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper", 74 [NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables", 75 [NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat", 76 [NFNL_SUBSYS_HOOK] = "nfnl_subsys_hook", 77 }; 78 79 static const int nfnl_group2type[NFNLGRP_MAX+1] = { 80 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 81 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK, 82 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK, 83 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 84 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 85 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 86 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, 87 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, 88 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES, 89 }; 90 91 static struct nfnl_net *nfnl_pernet(struct net *net) 92 { 93 return net_generic(net, nfnetlink_pernet_id); 94 } 95 96 void nfnl_lock(__u8 subsys_id) 97 { 98 mutex_lock(&table[subsys_id].mutex); 99 } 100 EXPORT_SYMBOL_GPL(nfnl_lock); 101 102 void nfnl_unlock(__u8 subsys_id) 103 { 104 mutex_unlock(&table[subsys_id].mutex); 105 } 106 EXPORT_SYMBOL_GPL(nfnl_unlock); 107 108 #ifdef CONFIG_PROVE_LOCKING 109 bool lockdep_nfnl_is_held(u8 subsys_id) 110 { 111 return lockdep_is_held(&table[subsys_id].mutex); 112 } 113 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held); 114 #endif 115 116 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 117 { 118 u8 cb_id; 119 120 /* Sanity-check attr_count size to avoid stack buffer overflow. */ 121 for (cb_id = 0; cb_id < n->cb_count; cb_id++) 122 if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT)) 123 return -EINVAL; 124 125 nfnl_lock(n->subsys_id); 126 if (table[n->subsys_id].subsys) { 127 nfnl_unlock(n->subsys_id); 128 return -EBUSY; 129 } 130 rcu_assign_pointer(table[n->subsys_id].subsys, n); 131 nfnl_unlock(n->subsys_id); 132 133 return 0; 134 } 135 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); 136 137 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 138 { 139 nfnl_lock(n->subsys_id); 140 table[n->subsys_id].subsys = NULL; 141 nfnl_unlock(n->subsys_id); 142 synchronize_rcu(); 143 return 0; 144 } 145 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 146 147 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type) 148 { 149 u8 subsys_id = NFNL_SUBSYS_ID(type); 150 151 if (subsys_id >= NFNL_SUBSYS_COUNT) 152 return NULL; 153 154 return rcu_dereference(table[subsys_id].subsys); 155 } 156 157 static inline const struct nfnl_callback * 158 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss) 159 { 160 u8 cb_id = NFNL_MSG_TYPE(type); 161 162 if (cb_id >= ss->cb_count) 163 return NULL; 164 165 return &ss->cb[cb_id]; 166 } 167 168 int nfnetlink_has_listeners(struct net *net, unsigned int group) 169 { 170 struct nfnl_net *nfnlnet = nfnl_pernet(net); 171 172 return netlink_has_listeners(nfnlnet->nfnl, group); 173 } 174 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 175 176 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 177 unsigned int group, int echo, gfp_t flags) 178 { 179 struct nfnl_net *nfnlnet = nfnl_pernet(net); 180 181 return nlmsg_notify(nfnlnet->nfnl, skb, portid, group, echo, flags); 182 } 183 EXPORT_SYMBOL_GPL(nfnetlink_send); 184 185 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) 186 { 187 struct nfnl_net *nfnlnet = nfnl_pernet(net); 188 189 return netlink_set_err(nfnlnet->nfnl, portid, group, error); 190 } 191 EXPORT_SYMBOL_GPL(nfnetlink_set_err); 192 193 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) 194 { 195 struct nfnl_net *nfnlnet = nfnl_pernet(net); 196 int err; 197 198 err = nlmsg_unicast(nfnlnet->nfnl, skb, portid); 199 if (err == -EAGAIN) 200 err = -ENOBUFS; 201 202 return err; 203 } 204 EXPORT_SYMBOL_GPL(nfnetlink_unicast); 205 206 void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid, 207 __u32 group, gfp_t allocation) 208 { 209 struct nfnl_net *nfnlnet = nfnl_pernet(net); 210 211 netlink_broadcast(nfnlnet->nfnl, skb, portid, group, allocation); 212 } 213 EXPORT_SYMBOL_GPL(nfnetlink_broadcast); 214 215 /* Process one complete nfnetlink message. */ 216 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 217 struct netlink_ext_ack *extack) 218 { 219 struct net *net = sock_net(skb->sk); 220 const struct nfnl_callback *nc; 221 const struct nfnetlink_subsystem *ss; 222 int type, err; 223 224 /* All the messages must at least contain nfgenmsg */ 225 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg)) 226 return 0; 227 228 type = nlh->nlmsg_type; 229 replay: 230 rcu_read_lock(); 231 232 ss = nfnetlink_get_subsys(type); 233 if (!ss) { 234 #ifdef CONFIG_MODULES 235 rcu_read_unlock(); 236 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 237 rcu_read_lock(); 238 ss = nfnetlink_get_subsys(type); 239 if (!ss) 240 #endif 241 { 242 rcu_read_unlock(); 243 return -EINVAL; 244 } 245 } 246 247 nc = nfnetlink_find_client(type, ss); 248 if (!nc) { 249 rcu_read_unlock(); 250 return -EINVAL; 251 } 252 253 { 254 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 255 struct nfnl_net *nfnlnet = nfnl_pernet(net); 256 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 257 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1]; 258 struct nlattr *attr = (void *)nlh + min_len; 259 int attrlen = nlh->nlmsg_len - min_len; 260 __u8 subsys_id = NFNL_SUBSYS_ID(type); 261 struct nfnl_info info = { 262 .net = net, 263 .sk = nfnlnet->nfnl, 264 .nlh = nlh, 265 .nfmsg = nlmsg_data(nlh), 266 .extack = extack, 267 }; 268 269 /* Sanity-check NFNL_MAX_ATTR_COUNT */ 270 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) { 271 rcu_read_unlock(); 272 return -ENOMEM; 273 } 274 275 err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count, 276 attr, attrlen, 277 ss->cb[cb_id].policy, extack); 278 if (err < 0) { 279 rcu_read_unlock(); 280 return err; 281 } 282 283 if (!nc->call) { 284 rcu_read_unlock(); 285 return -EINVAL; 286 } 287 288 switch (nc->type) { 289 case NFNL_CB_RCU: 290 err = nc->call(skb, &info, (const struct nlattr **)cda); 291 rcu_read_unlock(); 292 break; 293 case NFNL_CB_MUTEX: 294 rcu_read_unlock(); 295 nfnl_lock(subsys_id); 296 if (nfnl_dereference_protected(subsys_id) != ss || 297 nfnetlink_find_client(type, ss) != nc) { 298 nfnl_unlock(subsys_id); 299 err = -EAGAIN; 300 break; 301 } 302 err = nc->call(skb, &info, (const struct nlattr **)cda); 303 nfnl_unlock(subsys_id); 304 break; 305 default: 306 rcu_read_unlock(); 307 err = -EINVAL; 308 break; 309 } 310 if (err == -EAGAIN) 311 goto replay; 312 return err; 313 } 314 } 315 316 struct nfnl_err { 317 struct list_head head; 318 struct nlmsghdr *nlh; 319 int err; 320 struct netlink_ext_ack extack; 321 }; 322 323 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err, 324 const struct netlink_ext_ack *extack) 325 { 326 struct nfnl_err *nfnl_err; 327 328 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); 329 if (nfnl_err == NULL) 330 return -ENOMEM; 331 332 nfnl_err->nlh = nlh; 333 nfnl_err->err = err; 334 nfnl_err->extack = *extack; 335 list_add_tail(&nfnl_err->head, list); 336 337 return 0; 338 } 339 340 static void nfnl_err_del(struct nfnl_err *nfnl_err) 341 { 342 list_del(&nfnl_err->head); 343 kfree(nfnl_err); 344 } 345 346 static void nfnl_err_reset(struct list_head *err_list) 347 { 348 struct nfnl_err *nfnl_err, *next; 349 350 list_for_each_entry_safe(nfnl_err, next, err_list, head) 351 nfnl_err_del(nfnl_err); 352 } 353 354 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) 355 { 356 struct nfnl_err *nfnl_err, *next; 357 358 list_for_each_entry_safe(nfnl_err, next, err_list, head) { 359 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err, 360 &nfnl_err->extack); 361 nfnl_err_del(nfnl_err); 362 } 363 } 364 365 enum { 366 NFNL_BATCH_FAILURE = (1 << 0), 367 NFNL_BATCH_DONE = (1 << 1), 368 NFNL_BATCH_REPLAY = (1 << 2), 369 }; 370 371 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 372 u16 subsys_id, u32 genid) 373 { 374 struct sk_buff *oskb = skb; 375 struct net *net = sock_net(skb->sk); 376 const struct nfnetlink_subsystem *ss; 377 const struct nfnl_callback *nc; 378 struct netlink_ext_ack extack; 379 LIST_HEAD(err_list); 380 u32 status; 381 int err; 382 383 if (subsys_id >= NFNL_SUBSYS_COUNT) 384 return netlink_ack(skb, nlh, -EINVAL, NULL); 385 replay: 386 status = 0; 387 replay_abort: 388 skb = netlink_skb_clone(oskb, GFP_KERNEL); 389 if (!skb) 390 return netlink_ack(oskb, nlh, -ENOMEM, NULL); 391 392 nfnl_lock(subsys_id); 393 ss = nfnl_dereference_protected(subsys_id); 394 if (!ss) { 395 #ifdef CONFIG_MODULES 396 nfnl_unlock(subsys_id); 397 request_module("nfnetlink-subsys-%d", subsys_id); 398 nfnl_lock(subsys_id); 399 ss = nfnl_dereference_protected(subsys_id); 400 if (!ss) 401 #endif 402 { 403 nfnl_unlock(subsys_id); 404 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 405 return kfree_skb(skb); 406 } 407 } 408 409 if (!ss->valid_genid || !ss->commit || !ss->abort) { 410 nfnl_unlock(subsys_id); 411 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 412 return kfree_skb(skb); 413 } 414 415 if (!try_module_get(ss->owner)) { 416 nfnl_unlock(subsys_id); 417 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 418 return kfree_skb(skb); 419 } 420 421 if (!ss->valid_genid(net, genid)) { 422 module_put(ss->owner); 423 nfnl_unlock(subsys_id); 424 netlink_ack(oskb, nlh, -ERESTART, NULL); 425 return kfree_skb(skb); 426 } 427 428 nfnl_unlock(subsys_id); 429 430 if (nlh->nlmsg_flags & NLM_F_ACK) 431 nfnl_err_add(&err_list, nlh, 0, &extack); 432 433 while (skb->len >= nlmsg_total_size(0)) { 434 int msglen, type; 435 436 if (fatal_signal_pending(current)) { 437 nfnl_err_reset(&err_list); 438 err = -EINTR; 439 status = NFNL_BATCH_FAILURE; 440 goto done; 441 } 442 443 memset(&extack, 0, sizeof(extack)); 444 nlh = nlmsg_hdr(skb); 445 err = 0; 446 447 if (nlh->nlmsg_len < NLMSG_HDRLEN || 448 skb->len < nlh->nlmsg_len || 449 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) { 450 nfnl_err_reset(&err_list); 451 status |= NFNL_BATCH_FAILURE; 452 goto done; 453 } 454 455 /* Only requests are handled by the kernel */ 456 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { 457 err = -EINVAL; 458 goto ack; 459 } 460 461 type = nlh->nlmsg_type; 462 if (type == NFNL_MSG_BATCH_BEGIN) { 463 /* Malformed: Batch begin twice */ 464 nfnl_err_reset(&err_list); 465 status |= NFNL_BATCH_FAILURE; 466 goto done; 467 } else if (type == NFNL_MSG_BATCH_END) { 468 status |= NFNL_BATCH_DONE; 469 goto done; 470 } else if (type < NLMSG_MIN_TYPE) { 471 err = -EINVAL; 472 goto ack; 473 } 474 475 /* We only accept a batch with messages for the same 476 * subsystem. 477 */ 478 if (NFNL_SUBSYS_ID(type) != subsys_id) { 479 err = -EINVAL; 480 goto ack; 481 } 482 483 nc = nfnetlink_find_client(type, ss); 484 if (!nc) { 485 err = -EINVAL; 486 goto ack; 487 } 488 489 if (nc->type != NFNL_CB_BATCH) { 490 err = -EINVAL; 491 goto ack; 492 } 493 494 { 495 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 496 struct nfnl_net *nfnlnet = nfnl_pernet(net); 497 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1]; 498 struct nlattr *attr = (void *)nlh + min_len; 499 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 500 int attrlen = nlh->nlmsg_len - min_len; 501 struct nfnl_info info = { 502 .net = net, 503 .sk = nfnlnet->nfnl, 504 .nlh = nlh, 505 .nfmsg = nlmsg_data(nlh), 506 .extack = &extack, 507 }; 508 509 /* Sanity-check NFTA_MAX_ATTR */ 510 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) { 511 err = -ENOMEM; 512 goto ack; 513 } 514 515 err = nla_parse_deprecated(cda, 516 ss->cb[cb_id].attr_count, 517 attr, attrlen, 518 ss->cb[cb_id].policy, NULL); 519 if (err < 0) 520 goto ack; 521 522 err = nc->call(skb, &info, (const struct nlattr **)cda); 523 524 /* The lock was released to autoload some module, we 525 * have to abort and start from scratch using the 526 * original skb. 527 */ 528 if (err == -EAGAIN) { 529 status |= NFNL_BATCH_REPLAY; 530 goto done; 531 } 532 } 533 ack: 534 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 535 /* Errors are delivered once the full batch has been 536 * processed, this avoids that the same error is 537 * reported several times when replaying the batch. 538 */ 539 if (err == -ENOMEM || 540 nfnl_err_add(&err_list, nlh, err, &extack) < 0) { 541 /* We failed to enqueue an error, reset the 542 * list of errors and send OOM to userspace 543 * pointing to the batch header. 544 */ 545 nfnl_err_reset(&err_list); 546 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM, 547 NULL); 548 status |= NFNL_BATCH_FAILURE; 549 goto done; 550 } 551 /* We don't stop processing the batch on errors, thus, 552 * userspace gets all the errors that the batch 553 * triggers. 554 */ 555 if (err) 556 status |= NFNL_BATCH_FAILURE; 557 } 558 559 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 560 if (msglen > skb->len) 561 msglen = skb->len; 562 skb_pull(skb, msglen); 563 } 564 done: 565 if (status & NFNL_BATCH_REPLAY) { 566 ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD); 567 nfnl_err_reset(&err_list); 568 kfree_skb(skb); 569 module_put(ss->owner); 570 goto replay; 571 } else if (status == NFNL_BATCH_DONE) { 572 err = ss->commit(net, oskb); 573 if (err == -EAGAIN) { 574 status |= NFNL_BATCH_REPLAY; 575 goto done; 576 } else if (err) { 577 ss->abort(net, oskb, NFNL_ABORT_NONE); 578 netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); 579 } else if (nlh->nlmsg_flags & NLM_F_ACK) { 580 nfnl_err_add(&err_list, nlh, 0, &extack); 581 } 582 } else { 583 enum nfnl_abort_action abort_action; 584 585 if (status & NFNL_BATCH_FAILURE) 586 abort_action = NFNL_ABORT_NONE; 587 else 588 abort_action = NFNL_ABORT_VALIDATE; 589 590 err = ss->abort(net, oskb, abort_action); 591 if (err == -EAGAIN) { 592 nfnl_err_reset(&err_list); 593 kfree_skb(skb); 594 module_put(ss->owner); 595 status |= NFNL_BATCH_FAILURE; 596 goto replay_abort; 597 } 598 } 599 600 nfnl_err_deliver(&err_list, oskb); 601 kfree_skb(skb); 602 module_put(ss->owner); 603 } 604 605 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { 606 [NFNL_BATCH_GENID] = { .type = NLA_U32 }, 607 }; 608 609 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) 610 { 611 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 612 struct nlattr *attr = (void *)nlh + min_len; 613 struct nlattr *cda[NFNL_BATCH_MAX + 1]; 614 int attrlen = nlh->nlmsg_len - min_len; 615 struct nfgenmsg *nfgenmsg; 616 int msglen, err; 617 u32 gen_id = 0; 618 u16 res_id; 619 620 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 621 if (msglen > skb->len) 622 msglen = skb->len; 623 624 if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) 625 return; 626 627 err = nla_parse_deprecated(cda, NFNL_BATCH_MAX, attr, attrlen, 628 nfnl_batch_policy, NULL); 629 if (err < 0) { 630 netlink_ack(skb, nlh, err, NULL); 631 return; 632 } 633 if (cda[NFNL_BATCH_GENID]) 634 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID])); 635 636 nfgenmsg = nlmsg_data(nlh); 637 skb_pull(skb, msglen); 638 /* Work around old nft using host byte order */ 639 if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES) 640 res_id = NFNL_SUBSYS_NFTABLES; 641 else 642 res_id = ntohs(nfgenmsg->res_id); 643 644 nfnetlink_rcv_batch(skb, nlh, res_id, gen_id); 645 } 646 647 static void nfnetlink_rcv(struct sk_buff *skb) 648 { 649 struct nlmsghdr *nlh = nlmsg_hdr(skb); 650 651 if (skb->len < NLMSG_HDRLEN || 652 nlh->nlmsg_len < NLMSG_HDRLEN || 653 skb->len < nlh->nlmsg_len) 654 return; 655 656 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 657 netlink_ack(skb, nlh, -EPERM, NULL); 658 return; 659 } 660 661 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) 662 nfnetlink_rcv_skb_batch(skb, nlh); 663 else 664 netlink_rcv_skb(skb, nfnetlink_rcv_msg); 665 } 666 667 static void nfnetlink_bind_event(struct net *net, unsigned int group) 668 { 669 #ifdef CONFIG_NF_CONNTRACK_EVENTS 670 int type, group_bit; 671 u8 v; 672 673 /* All NFNLGRP_CONNTRACK_* group bits fit into u8. 674 * The other groups are not relevant and can be ignored. 675 */ 676 if (group >= 8) 677 return; 678 679 type = nfnl_group2type[group]; 680 681 switch (type) { 682 case NFNL_SUBSYS_CTNETLINK: 683 break; 684 case NFNL_SUBSYS_CTNETLINK_EXP: 685 break; 686 default: 687 return; 688 } 689 690 group_bit = (1 << group); 691 692 spin_lock(&nfnl_grp_active_lock); 693 v = READ_ONCE(nf_ctnetlink_has_listener); 694 if ((v & group_bit) == 0) { 695 v |= group_bit; 696 697 /* read concurrently without nfnl_grp_active_lock held. */ 698 WRITE_ONCE(nf_ctnetlink_has_listener, v); 699 } 700 701 spin_unlock(&nfnl_grp_active_lock); 702 #endif 703 } 704 705 static int nfnetlink_bind(struct net *net, int group) 706 { 707 const struct nfnetlink_subsystem *ss; 708 int type; 709 710 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 711 return 0; 712 713 type = nfnl_group2type[group]; 714 715 rcu_read_lock(); 716 ss = nfnetlink_get_subsys(type << 8); 717 rcu_read_unlock(); 718 if (!ss) 719 request_module_nowait("nfnetlink-subsys-%d", type); 720 721 nfnetlink_bind_event(net, group); 722 return 0; 723 } 724 725 static void nfnetlink_unbind(struct net *net, int group) 726 { 727 #ifdef CONFIG_NF_CONNTRACK_EVENTS 728 int type, group_bit; 729 730 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 731 return; 732 733 type = nfnl_group2type[group]; 734 735 switch (type) { 736 case NFNL_SUBSYS_CTNETLINK: 737 break; 738 case NFNL_SUBSYS_CTNETLINK_EXP: 739 break; 740 default: 741 return; 742 } 743 744 /* ctnetlink_has_listener is u8 */ 745 if (group >= 8) 746 return; 747 748 group_bit = (1 << group); 749 750 spin_lock(&nfnl_grp_active_lock); 751 if (!nfnetlink_has_listeners(net, group)) { 752 u8 v = READ_ONCE(nf_ctnetlink_has_listener); 753 754 v &= ~group_bit; 755 756 /* read concurrently without nfnl_grp_active_lock held. */ 757 WRITE_ONCE(nf_ctnetlink_has_listener, v); 758 } 759 spin_unlock(&nfnl_grp_active_lock); 760 #endif 761 } 762 763 static int __net_init nfnetlink_net_init(struct net *net) 764 { 765 struct nfnl_net *nfnlnet = nfnl_pernet(net); 766 struct netlink_kernel_cfg cfg = { 767 .groups = NFNLGRP_MAX, 768 .input = nfnetlink_rcv, 769 .bind = nfnetlink_bind, 770 .unbind = nfnetlink_unbind, 771 }; 772 773 nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); 774 if (!nfnlnet->nfnl) 775 return -ENOMEM; 776 return 0; 777 } 778 779 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) 780 { 781 struct nfnl_net *nfnlnet; 782 struct net *net; 783 784 list_for_each_entry(net, net_exit_list, exit_list) { 785 nfnlnet = nfnl_pernet(net); 786 787 netlink_kernel_release(nfnlnet->nfnl); 788 } 789 } 790 791 static struct pernet_operations nfnetlink_net_ops = { 792 .init = nfnetlink_net_init, 793 .exit_batch = nfnetlink_net_exit_batch, 794 .id = &nfnetlink_pernet_id, 795 .size = sizeof(struct nfnl_net), 796 }; 797 798 static int __init nfnetlink_init(void) 799 { 800 int i; 801 802 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) 803 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); 804 805 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 806 __mutex_init(&table[i].mutex, nfnl_lockdep_names[i], &nfnl_lockdep_keys[i]); 807 808 return register_pernet_subsys(&nfnetlink_net_ops); 809 } 810 811 static void __exit nfnetlink_exit(void) 812 { 813 unregister_pernet_subsys(&nfnetlink_net_ops); 814 } 815 module_init(nfnetlink_init); 816 module_exit(nfnetlink_exit); 817