1 /* Netfilter messages via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>, 5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * 8 * Initial netfilter messages via netlink development funded and 9 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * 11 * Further development of this code funded by Astaro AG (http://www.astaro.com) 12 * 13 * This software may be used and distributed according to the terms 14 * of the GNU General Public License, incorporated herein by reference. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/sockios.h> 23 #include <linux/net.h> 24 #include <linux/skbuff.h> 25 #include <linux/uaccess.h> 26 #include <net/sock.h> 27 #include <linux/init.h> 28 #include <linux/sched/signal.h> 29 30 #include <net/netlink.h> 31 #include <net/netns/generic.h> 32 #include <linux/netfilter/nfnetlink.h> 33 34 MODULE_LICENSE("GPL"); 35 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 36 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); 37 MODULE_DESCRIPTION("Netfilter messages via netlink socket"); 38 39 #define nfnl_dereference_protected(id) \ 40 rcu_dereference_protected(table[(id)].subsys, \ 41 lockdep_nfnl_is_held((id))) 42 43 #define NFNL_MAX_ATTR_COUNT 32 44 45 static unsigned int nfnetlink_pernet_id __read_mostly; 46 47 #ifdef CONFIG_NF_CONNTRACK_EVENTS 48 static DEFINE_SPINLOCK(nfnl_grp_active_lock); 49 #endif 50 51 struct nfnl_net { 52 struct sock *nfnl; 53 }; 54 55 static struct { 56 struct mutex mutex; 57 const struct nfnetlink_subsystem __rcu *subsys; 58 } table[NFNL_SUBSYS_COUNT]; 59 60 static struct lock_class_key nfnl_lockdep_keys[NFNL_SUBSYS_COUNT]; 61 62 static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = { 63 [NFNL_SUBSYS_NONE] = "nfnl_subsys_none", 64 [NFNL_SUBSYS_CTNETLINK] = "nfnl_subsys_ctnetlink", 65 [NFNL_SUBSYS_CTNETLINK_EXP] = "nfnl_subsys_ctnetlink_exp", 66 [NFNL_SUBSYS_QUEUE] = "nfnl_subsys_queue", 67 [NFNL_SUBSYS_ULOG] = "nfnl_subsys_ulog", 68 [NFNL_SUBSYS_OSF] = "nfnl_subsys_osf", 69 [NFNL_SUBSYS_IPSET] = "nfnl_subsys_ipset", 70 [NFNL_SUBSYS_ACCT] = "nfnl_subsys_acct", 71 [NFNL_SUBSYS_CTNETLINK_TIMEOUT] = "nfnl_subsys_cttimeout", 72 [NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper", 73 [NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables", 74 [NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat", 75 [NFNL_SUBSYS_HOOK] = "nfnl_subsys_hook", 76 }; 77 78 static const int nfnl_group2type[NFNLGRP_MAX+1] = { 79 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 80 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK, 81 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK, 82 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 83 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 84 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 85 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, 86 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, 87 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES, 88 }; 89 90 static struct nfnl_net *nfnl_pernet(struct net *net) 91 { 92 return net_generic(net, nfnetlink_pernet_id); 93 } 94 95 void nfnl_lock(__u8 subsys_id) 96 { 97 mutex_lock(&table[subsys_id].mutex); 98 } 99 EXPORT_SYMBOL_GPL(nfnl_lock); 100 101 void nfnl_unlock(__u8 subsys_id) 102 { 103 mutex_unlock(&table[subsys_id].mutex); 104 } 105 EXPORT_SYMBOL_GPL(nfnl_unlock); 106 107 #ifdef CONFIG_PROVE_LOCKING 108 bool lockdep_nfnl_is_held(u8 subsys_id) 109 { 110 return lockdep_is_held(&table[subsys_id].mutex); 111 } 112 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held); 113 #endif 114 115 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 116 { 117 u8 cb_id; 118 119 /* Sanity-check attr_count size to avoid stack buffer overflow. */ 120 for (cb_id = 0; cb_id < n->cb_count; cb_id++) 121 if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT)) 122 return -EINVAL; 123 124 nfnl_lock(n->subsys_id); 125 if (table[n->subsys_id].subsys) { 126 nfnl_unlock(n->subsys_id); 127 return -EBUSY; 128 } 129 rcu_assign_pointer(table[n->subsys_id].subsys, n); 130 nfnl_unlock(n->subsys_id); 131 132 return 0; 133 } 134 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); 135 136 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 137 { 138 nfnl_lock(n->subsys_id); 139 table[n->subsys_id].subsys = NULL; 140 nfnl_unlock(n->subsys_id); 141 synchronize_rcu(); 142 return 0; 143 } 144 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 145 146 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type) 147 { 148 u8 subsys_id = NFNL_SUBSYS_ID(type); 149 150 if (subsys_id >= NFNL_SUBSYS_COUNT) 151 return NULL; 152 153 return rcu_dereference(table[subsys_id].subsys); 154 } 155 156 static inline const struct nfnl_callback * 157 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss) 158 { 159 u8 cb_id = NFNL_MSG_TYPE(type); 160 161 if (cb_id >= ss->cb_count) 162 return NULL; 163 164 return &ss->cb[cb_id]; 165 } 166 167 int nfnetlink_has_listeners(struct net *net, unsigned int group) 168 { 169 struct nfnl_net *nfnlnet = nfnl_pernet(net); 170 171 return netlink_has_listeners(nfnlnet->nfnl, group); 172 } 173 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 174 175 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 176 unsigned int group, int echo, gfp_t flags) 177 { 178 struct nfnl_net *nfnlnet = nfnl_pernet(net); 179 180 return nlmsg_notify(nfnlnet->nfnl, skb, portid, group, echo, flags); 181 } 182 EXPORT_SYMBOL_GPL(nfnetlink_send); 183 184 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) 185 { 186 struct nfnl_net *nfnlnet = nfnl_pernet(net); 187 188 return netlink_set_err(nfnlnet->nfnl, portid, group, error); 189 } 190 EXPORT_SYMBOL_GPL(nfnetlink_set_err); 191 192 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) 193 { 194 struct nfnl_net *nfnlnet = nfnl_pernet(net); 195 int err; 196 197 err = nlmsg_unicast(nfnlnet->nfnl, skb, portid); 198 if (err == -EAGAIN) 199 err = -ENOBUFS; 200 201 return err; 202 } 203 EXPORT_SYMBOL_GPL(nfnetlink_unicast); 204 205 void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid, 206 __u32 group, gfp_t allocation) 207 { 208 struct nfnl_net *nfnlnet = nfnl_pernet(net); 209 210 netlink_broadcast(nfnlnet->nfnl, skb, portid, group, allocation); 211 } 212 EXPORT_SYMBOL_GPL(nfnetlink_broadcast); 213 214 /* Process one complete nfnetlink message. */ 215 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 216 struct netlink_ext_ack *extack) 217 { 218 struct net *net = sock_net(skb->sk); 219 const struct nfnl_callback *nc; 220 const struct nfnetlink_subsystem *ss; 221 int type, err; 222 223 /* All the messages must at least contain nfgenmsg */ 224 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg)) 225 return 0; 226 227 type = nlh->nlmsg_type; 228 replay: 229 rcu_read_lock(); 230 231 ss = nfnetlink_get_subsys(type); 232 if (!ss) { 233 #ifdef CONFIG_MODULES 234 rcu_read_unlock(); 235 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 236 rcu_read_lock(); 237 ss = nfnetlink_get_subsys(type); 238 if (!ss) 239 #endif 240 { 241 rcu_read_unlock(); 242 return -EINVAL; 243 } 244 } 245 246 nc = nfnetlink_find_client(type, ss); 247 if (!nc) { 248 rcu_read_unlock(); 249 return -EINVAL; 250 } 251 252 { 253 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 254 struct nfnl_net *nfnlnet = nfnl_pernet(net); 255 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 256 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1]; 257 struct nlattr *attr = (void *)nlh + min_len; 258 int attrlen = nlh->nlmsg_len - min_len; 259 __u8 subsys_id = NFNL_SUBSYS_ID(type); 260 struct nfnl_info info = { 261 .net = net, 262 .sk = nfnlnet->nfnl, 263 .nlh = nlh, 264 .nfmsg = nlmsg_data(nlh), 265 .extack = extack, 266 }; 267 268 /* Sanity-check NFNL_MAX_ATTR_COUNT */ 269 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) { 270 rcu_read_unlock(); 271 return -ENOMEM; 272 } 273 274 err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count, 275 attr, attrlen, 276 ss->cb[cb_id].policy, extack); 277 if (err < 0) { 278 rcu_read_unlock(); 279 return err; 280 } 281 282 if (!nc->call) { 283 rcu_read_unlock(); 284 return -EINVAL; 285 } 286 287 switch (nc->type) { 288 case NFNL_CB_RCU: 289 err = nc->call(skb, &info, (const struct nlattr **)cda); 290 rcu_read_unlock(); 291 break; 292 case NFNL_CB_MUTEX: 293 rcu_read_unlock(); 294 nfnl_lock(subsys_id); 295 if (nfnl_dereference_protected(subsys_id) != ss || 296 nfnetlink_find_client(type, ss) != nc) { 297 err = -EAGAIN; 298 break; 299 } 300 err = nc->call(skb, &info, (const struct nlattr **)cda); 301 nfnl_unlock(subsys_id); 302 break; 303 default: 304 rcu_read_unlock(); 305 err = -EINVAL; 306 break; 307 } 308 if (err == -EAGAIN) 309 goto replay; 310 return err; 311 } 312 } 313 314 struct nfnl_err { 315 struct list_head head; 316 struct nlmsghdr *nlh; 317 int err; 318 struct netlink_ext_ack extack; 319 }; 320 321 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err, 322 const struct netlink_ext_ack *extack) 323 { 324 struct nfnl_err *nfnl_err; 325 326 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); 327 if (nfnl_err == NULL) 328 return -ENOMEM; 329 330 nfnl_err->nlh = nlh; 331 nfnl_err->err = err; 332 nfnl_err->extack = *extack; 333 list_add_tail(&nfnl_err->head, list); 334 335 return 0; 336 } 337 338 static void nfnl_err_del(struct nfnl_err *nfnl_err) 339 { 340 list_del(&nfnl_err->head); 341 kfree(nfnl_err); 342 } 343 344 static void nfnl_err_reset(struct list_head *err_list) 345 { 346 struct nfnl_err *nfnl_err, *next; 347 348 list_for_each_entry_safe(nfnl_err, next, err_list, head) 349 nfnl_err_del(nfnl_err); 350 } 351 352 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) 353 { 354 struct nfnl_err *nfnl_err, *next; 355 356 list_for_each_entry_safe(nfnl_err, next, err_list, head) { 357 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err, 358 &nfnl_err->extack); 359 nfnl_err_del(nfnl_err); 360 } 361 } 362 363 enum { 364 NFNL_BATCH_FAILURE = (1 << 0), 365 NFNL_BATCH_DONE = (1 << 1), 366 NFNL_BATCH_REPLAY = (1 << 2), 367 }; 368 369 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 370 u16 subsys_id, u32 genid) 371 { 372 struct sk_buff *oskb = skb; 373 struct net *net = sock_net(skb->sk); 374 const struct nfnetlink_subsystem *ss; 375 const struct nfnl_callback *nc; 376 struct netlink_ext_ack extack; 377 LIST_HEAD(err_list); 378 u32 status; 379 int err; 380 381 if (subsys_id >= NFNL_SUBSYS_COUNT) 382 return netlink_ack(skb, nlh, -EINVAL, NULL); 383 replay: 384 status = 0; 385 replay_abort: 386 skb = netlink_skb_clone(oskb, GFP_KERNEL); 387 if (!skb) 388 return netlink_ack(oskb, nlh, -ENOMEM, NULL); 389 390 nfnl_lock(subsys_id); 391 ss = nfnl_dereference_protected(subsys_id); 392 if (!ss) { 393 #ifdef CONFIG_MODULES 394 nfnl_unlock(subsys_id); 395 request_module("nfnetlink-subsys-%d", subsys_id); 396 nfnl_lock(subsys_id); 397 ss = nfnl_dereference_protected(subsys_id); 398 if (!ss) 399 #endif 400 { 401 nfnl_unlock(subsys_id); 402 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 403 return kfree_skb(skb); 404 } 405 } 406 407 if (!ss->valid_genid || !ss->commit || !ss->abort) { 408 nfnl_unlock(subsys_id); 409 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 410 return kfree_skb(skb); 411 } 412 413 if (!try_module_get(ss->owner)) { 414 nfnl_unlock(subsys_id); 415 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 416 return kfree_skb(skb); 417 } 418 419 if (!ss->valid_genid(net, genid)) { 420 module_put(ss->owner); 421 nfnl_unlock(subsys_id); 422 netlink_ack(oskb, nlh, -ERESTART, NULL); 423 return kfree_skb(skb); 424 } 425 426 nfnl_unlock(subsys_id); 427 428 while (skb->len >= nlmsg_total_size(0)) { 429 int msglen, type; 430 431 if (fatal_signal_pending(current)) { 432 nfnl_err_reset(&err_list); 433 err = -EINTR; 434 status = NFNL_BATCH_FAILURE; 435 goto done; 436 } 437 438 memset(&extack, 0, sizeof(extack)); 439 nlh = nlmsg_hdr(skb); 440 err = 0; 441 442 if (nlh->nlmsg_len < NLMSG_HDRLEN || 443 skb->len < nlh->nlmsg_len || 444 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) { 445 nfnl_err_reset(&err_list); 446 status |= NFNL_BATCH_FAILURE; 447 goto done; 448 } 449 450 /* Only requests are handled by the kernel */ 451 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { 452 err = -EINVAL; 453 goto ack; 454 } 455 456 type = nlh->nlmsg_type; 457 if (type == NFNL_MSG_BATCH_BEGIN) { 458 /* Malformed: Batch begin twice */ 459 nfnl_err_reset(&err_list); 460 status |= NFNL_BATCH_FAILURE; 461 goto done; 462 } else if (type == NFNL_MSG_BATCH_END) { 463 status |= NFNL_BATCH_DONE; 464 goto done; 465 } else if (type < NLMSG_MIN_TYPE) { 466 err = -EINVAL; 467 goto ack; 468 } 469 470 /* We only accept a batch with messages for the same 471 * subsystem. 472 */ 473 if (NFNL_SUBSYS_ID(type) != subsys_id) { 474 err = -EINVAL; 475 goto ack; 476 } 477 478 nc = nfnetlink_find_client(type, ss); 479 if (!nc) { 480 err = -EINVAL; 481 goto ack; 482 } 483 484 if (nc->type != NFNL_CB_BATCH) { 485 err = -EINVAL; 486 goto ack; 487 } 488 489 { 490 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 491 struct nfnl_net *nfnlnet = nfnl_pernet(net); 492 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1]; 493 struct nlattr *attr = (void *)nlh + min_len; 494 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 495 int attrlen = nlh->nlmsg_len - min_len; 496 struct nfnl_info info = { 497 .net = net, 498 .sk = nfnlnet->nfnl, 499 .nlh = nlh, 500 .nfmsg = nlmsg_data(nlh), 501 .extack = &extack, 502 }; 503 504 /* Sanity-check NFTA_MAX_ATTR */ 505 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) { 506 err = -ENOMEM; 507 goto ack; 508 } 509 510 err = nla_parse_deprecated(cda, 511 ss->cb[cb_id].attr_count, 512 attr, attrlen, 513 ss->cb[cb_id].policy, NULL); 514 if (err < 0) 515 goto ack; 516 517 err = nc->call(skb, &info, (const struct nlattr **)cda); 518 519 /* The lock was released to autoload some module, we 520 * have to abort and start from scratch using the 521 * original skb. 522 */ 523 if (err == -EAGAIN) { 524 status |= NFNL_BATCH_REPLAY; 525 goto done; 526 } 527 } 528 ack: 529 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 530 /* Errors are delivered once the full batch has been 531 * processed, this avoids that the same error is 532 * reported several times when replaying the batch. 533 */ 534 if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) { 535 /* We failed to enqueue an error, reset the 536 * list of errors and send OOM to userspace 537 * pointing to the batch header. 538 */ 539 nfnl_err_reset(&err_list); 540 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM, 541 NULL); 542 status |= NFNL_BATCH_FAILURE; 543 goto done; 544 } 545 /* We don't stop processing the batch on errors, thus, 546 * userspace gets all the errors that the batch 547 * triggers. 548 */ 549 if (err) 550 status |= NFNL_BATCH_FAILURE; 551 } 552 553 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 554 if (msglen > skb->len) 555 msglen = skb->len; 556 skb_pull(skb, msglen); 557 } 558 done: 559 if (status & NFNL_BATCH_REPLAY) { 560 ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD); 561 nfnl_err_reset(&err_list); 562 kfree_skb(skb); 563 module_put(ss->owner); 564 goto replay; 565 } else if (status == NFNL_BATCH_DONE) { 566 err = ss->commit(net, oskb); 567 if (err == -EAGAIN) { 568 status |= NFNL_BATCH_REPLAY; 569 goto done; 570 } else if (err) { 571 ss->abort(net, oskb, NFNL_ABORT_NONE); 572 netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); 573 } 574 } else { 575 enum nfnl_abort_action abort_action; 576 577 if (status & NFNL_BATCH_FAILURE) 578 abort_action = NFNL_ABORT_NONE; 579 else 580 abort_action = NFNL_ABORT_VALIDATE; 581 582 err = ss->abort(net, oskb, abort_action); 583 if (err == -EAGAIN) { 584 nfnl_err_reset(&err_list); 585 kfree_skb(skb); 586 module_put(ss->owner); 587 status |= NFNL_BATCH_FAILURE; 588 goto replay_abort; 589 } 590 } 591 if (ss->cleanup) 592 ss->cleanup(net); 593 594 nfnl_err_deliver(&err_list, oskb); 595 kfree_skb(skb); 596 module_put(ss->owner); 597 } 598 599 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { 600 [NFNL_BATCH_GENID] = { .type = NLA_U32 }, 601 }; 602 603 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) 604 { 605 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 606 struct nlattr *attr = (void *)nlh + min_len; 607 struct nlattr *cda[NFNL_BATCH_MAX + 1]; 608 int attrlen = nlh->nlmsg_len - min_len; 609 struct nfgenmsg *nfgenmsg; 610 int msglen, err; 611 u32 gen_id = 0; 612 u16 res_id; 613 614 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 615 if (msglen > skb->len) 616 msglen = skb->len; 617 618 if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) 619 return; 620 621 err = nla_parse_deprecated(cda, NFNL_BATCH_MAX, attr, attrlen, 622 nfnl_batch_policy, NULL); 623 if (err < 0) { 624 netlink_ack(skb, nlh, err, NULL); 625 return; 626 } 627 if (cda[NFNL_BATCH_GENID]) 628 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID])); 629 630 nfgenmsg = nlmsg_data(nlh); 631 skb_pull(skb, msglen); 632 /* Work around old nft using host byte order */ 633 if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES) 634 res_id = NFNL_SUBSYS_NFTABLES; 635 else 636 res_id = ntohs(nfgenmsg->res_id); 637 638 nfnetlink_rcv_batch(skb, nlh, res_id, gen_id); 639 } 640 641 static void nfnetlink_rcv(struct sk_buff *skb) 642 { 643 struct nlmsghdr *nlh = nlmsg_hdr(skb); 644 645 if (skb->len < NLMSG_HDRLEN || 646 nlh->nlmsg_len < NLMSG_HDRLEN || 647 skb->len < nlh->nlmsg_len) 648 return; 649 650 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 651 netlink_ack(skb, nlh, -EPERM, NULL); 652 return; 653 } 654 655 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) 656 nfnetlink_rcv_skb_batch(skb, nlh); 657 else 658 netlink_rcv_skb(skb, nfnetlink_rcv_msg); 659 } 660 661 static void nfnetlink_bind_event(struct net *net, unsigned int group) 662 { 663 #ifdef CONFIG_NF_CONNTRACK_EVENTS 664 int type, group_bit; 665 u8 v; 666 667 /* All NFNLGRP_CONNTRACK_* group bits fit into u8. 668 * The other groups are not relevant and can be ignored. 669 */ 670 if (group >= 8) 671 return; 672 673 type = nfnl_group2type[group]; 674 675 switch (type) { 676 case NFNL_SUBSYS_CTNETLINK: 677 break; 678 case NFNL_SUBSYS_CTNETLINK_EXP: 679 break; 680 default: 681 return; 682 } 683 684 group_bit = (1 << group); 685 686 spin_lock(&nfnl_grp_active_lock); 687 v = READ_ONCE(net->ct.ctnetlink_has_listener); 688 if ((v & group_bit) == 0) { 689 v |= group_bit; 690 691 /* read concurrently without nfnl_grp_active_lock held. */ 692 WRITE_ONCE(net->ct.ctnetlink_has_listener, v); 693 } 694 695 spin_unlock(&nfnl_grp_active_lock); 696 #endif 697 } 698 699 static int nfnetlink_bind(struct net *net, int group) 700 { 701 const struct nfnetlink_subsystem *ss; 702 int type; 703 704 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 705 return 0; 706 707 type = nfnl_group2type[group]; 708 709 rcu_read_lock(); 710 ss = nfnetlink_get_subsys(type << 8); 711 rcu_read_unlock(); 712 if (!ss) 713 request_module_nowait("nfnetlink-subsys-%d", type); 714 715 nfnetlink_bind_event(net, group); 716 return 0; 717 } 718 719 static void nfnetlink_unbind(struct net *net, int group) 720 { 721 #ifdef CONFIG_NF_CONNTRACK_EVENTS 722 int type, group_bit; 723 724 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 725 return; 726 727 type = nfnl_group2type[group]; 728 729 switch (type) { 730 case NFNL_SUBSYS_CTNETLINK: 731 break; 732 case NFNL_SUBSYS_CTNETLINK_EXP: 733 break; 734 default: 735 return; 736 } 737 738 /* ctnetlink_has_listener is u8 */ 739 if (group >= 8) 740 return; 741 742 group_bit = (1 << group); 743 744 spin_lock(&nfnl_grp_active_lock); 745 if (!nfnetlink_has_listeners(net, group)) { 746 u8 v = READ_ONCE(net->ct.ctnetlink_has_listener); 747 748 v &= ~group_bit; 749 750 /* read concurrently without nfnl_grp_active_lock held. */ 751 WRITE_ONCE(net->ct.ctnetlink_has_listener, v); 752 } 753 spin_unlock(&nfnl_grp_active_lock); 754 #endif 755 } 756 757 static int __net_init nfnetlink_net_init(struct net *net) 758 { 759 struct nfnl_net *nfnlnet = nfnl_pernet(net); 760 struct netlink_kernel_cfg cfg = { 761 .groups = NFNLGRP_MAX, 762 .input = nfnetlink_rcv, 763 .bind = nfnetlink_bind, 764 .unbind = nfnetlink_unbind, 765 }; 766 767 nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); 768 if (!nfnlnet->nfnl) 769 return -ENOMEM; 770 return 0; 771 } 772 773 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) 774 { 775 struct nfnl_net *nfnlnet; 776 struct net *net; 777 778 list_for_each_entry(net, net_exit_list, exit_list) { 779 nfnlnet = nfnl_pernet(net); 780 781 netlink_kernel_release(nfnlnet->nfnl); 782 } 783 } 784 785 static struct pernet_operations nfnetlink_net_ops = { 786 .init = nfnetlink_net_init, 787 .exit_batch = nfnetlink_net_exit_batch, 788 .id = &nfnetlink_pernet_id, 789 .size = sizeof(struct nfnl_net), 790 }; 791 792 static int __init nfnetlink_init(void) 793 { 794 int i; 795 796 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) 797 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); 798 799 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 800 __mutex_init(&table[i].mutex, nfnl_lockdep_names[i], &nfnl_lockdep_keys[i]); 801 802 return register_pernet_subsys(&nfnetlink_net_ops); 803 } 804 805 static void __exit nfnetlink_exit(void) 806 { 807 unregister_pernet_subsys(&nfnetlink_net_ops); 808 } 809 module_init(nfnetlink_init); 810 module_exit(nfnetlink_exit); 811