1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NETLINK Generic Netlink Family 4 * 5 * Authors: Jamal Hadi Salim 6 * Thomas Graf <tgraf@suug.ch> 7 * Johannes Berg <johannes@sipsolutions.net> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/errno.h> 14 #include <linux/types.h> 15 #include <linux/socket.h> 16 #include <linux/string_helpers.h> 17 #include <linux/skbuff.h> 18 #include <linux/mutex.h> 19 #include <linux/bitmap.h> 20 #include <linux/rwsem.h> 21 #include <linux/idr.h> 22 #include <net/sock.h> 23 #include <net/genetlink.h> 24 25 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 26 static DECLARE_RWSEM(cb_lock); 27 28 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0); 29 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq); 30 31 void genl_lock(void) 32 { 33 mutex_lock(&genl_mutex); 34 } 35 EXPORT_SYMBOL(genl_lock); 36 37 void genl_unlock(void) 38 { 39 mutex_unlock(&genl_mutex); 40 } 41 EXPORT_SYMBOL(genl_unlock); 42 43 static void genl_lock_all(void) 44 { 45 down_write(&cb_lock); 46 genl_lock(); 47 } 48 49 static void genl_unlock_all(void) 50 { 51 genl_unlock(); 52 up_write(&cb_lock); 53 } 54 55 static void genl_op_lock(const struct genl_family *family) 56 { 57 if (!family->parallel_ops) 58 genl_lock(); 59 } 60 61 static void genl_op_unlock(const struct genl_family *family) 62 { 63 if (!family->parallel_ops) 64 genl_unlock(); 65 } 66 67 static DEFINE_IDR(genl_fam_idr); 68 69 /* 70 * Bitmap of multicast groups that are currently in use. 71 * 72 * To avoid an allocation at boot of just one unsigned long, 73 * declare it global instead. 74 * Bit 0 is marked as already used since group 0 is invalid. 75 * Bit 1 is marked as already used since the drop-monitor code 76 * abuses the API and thinks it can statically use group 1. 77 * That group will typically conflict with other groups that 78 * any proper users use. 79 * Bit 16 is marked as used since it's used for generic netlink 80 * and the code no longer marks pre-reserved IDs as used. 81 * Bit 17 is marked as already used since the VFS quota code 82 * also abused this API and relied on family == group ID, we 83 * cater to that by giving it a static family and group ID. 84 * Bit 18 is marked as already used since the PMCRAID driver 85 * did the same thing as the VFS quota code (maybe copied?) 86 */ 87 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 88 BIT(GENL_ID_VFS_DQUOT) | 89 BIT(GENL_ID_PMCRAID); 90 static unsigned long *mc_groups = &mc_group_start; 91 static unsigned long mc_groups_longs = 1; 92 93 /* We need the last attribute with non-zero ID therefore a 2-entry array */ 94 static struct nla_policy genl_policy_reject_all[] = { 95 { .type = NLA_REJECT }, 96 { .type = NLA_REJECT }, 97 }; 98 99 static int genl_ctrl_event(int event, const struct genl_family *family, 100 const struct genl_multicast_group *grp, 101 int grp_id); 102 103 static void 104 genl_op_fill_in_reject_policy(const struct genl_family *family, 105 struct genl_ops *op) 106 { 107 BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1); 108 109 if (op->policy || op->cmd < family->resv_start_op) 110 return; 111 112 op->policy = genl_policy_reject_all; 113 op->maxattr = 1; 114 } 115 116 static void 117 genl_op_fill_in_reject_policy_split(const struct genl_family *family, 118 struct genl_split_ops *op) 119 { 120 if (op->policy) 121 return; 122 123 op->policy = genl_policy_reject_all; 124 op->maxattr = 1; 125 } 126 127 static const struct genl_family *genl_family_find_byid(unsigned int id) 128 { 129 return idr_find(&genl_fam_idr, id); 130 } 131 132 static const struct genl_family *genl_family_find_byname(char *name) 133 { 134 const struct genl_family *family; 135 unsigned int id; 136 137 idr_for_each_entry(&genl_fam_idr, family, id) 138 if (strcmp(family->name, name) == 0) 139 return family; 140 141 return NULL; 142 } 143 144 struct genl_op_iter { 145 const struct genl_family *family; 146 struct genl_split_ops doit; 147 struct genl_split_ops dumpit; 148 int cmd_idx; 149 int entry_idx; 150 u32 cmd; 151 u8 flags; 152 }; 153 154 static void genl_op_from_full(const struct genl_family *family, 155 unsigned int i, struct genl_ops *op) 156 { 157 *op = family->ops[i]; 158 159 if (!op->maxattr) 160 op->maxattr = family->maxattr; 161 if (!op->policy) 162 op->policy = family->policy; 163 164 genl_op_fill_in_reject_policy(family, op); 165 } 166 167 static int genl_get_cmd_full(u32 cmd, const struct genl_family *family, 168 struct genl_ops *op) 169 { 170 int i; 171 172 for (i = 0; i < family->n_ops; i++) 173 if (family->ops[i].cmd == cmd) { 174 genl_op_from_full(family, i, op); 175 return 0; 176 } 177 178 return -ENOENT; 179 } 180 181 static void genl_op_from_small(const struct genl_family *family, 182 unsigned int i, struct genl_ops *op) 183 { 184 memset(op, 0, sizeof(*op)); 185 op->doit = family->small_ops[i].doit; 186 op->dumpit = family->small_ops[i].dumpit; 187 op->cmd = family->small_ops[i].cmd; 188 op->internal_flags = family->small_ops[i].internal_flags; 189 op->flags = family->small_ops[i].flags; 190 op->validate = family->small_ops[i].validate; 191 192 op->maxattr = family->maxattr; 193 op->policy = family->policy; 194 195 genl_op_fill_in_reject_policy(family, op); 196 } 197 198 static int genl_get_cmd_small(u32 cmd, const struct genl_family *family, 199 struct genl_ops *op) 200 { 201 int i; 202 203 for (i = 0; i < family->n_small_ops; i++) 204 if (family->small_ops[i].cmd == cmd) { 205 genl_op_from_small(family, i, op); 206 return 0; 207 } 208 209 return -ENOENT; 210 } 211 212 static void genl_op_from_split(struct genl_op_iter *iter) 213 { 214 const struct genl_family *family = iter->family; 215 int i, cnt = 0; 216 217 i = iter->entry_idx - family->n_ops - family->n_small_ops; 218 219 if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) { 220 iter->doit = family->split_ops[i + cnt]; 221 genl_op_fill_in_reject_policy_split(family, &iter->doit); 222 cnt++; 223 } else { 224 memset(&iter->doit, 0, sizeof(iter->doit)); 225 } 226 227 if (i + cnt < family->n_split_ops && 228 family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP && 229 (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) { 230 iter->dumpit = family->split_ops[i + cnt]; 231 genl_op_fill_in_reject_policy_split(family, &iter->dumpit); 232 cnt++; 233 } else { 234 memset(&iter->dumpit, 0, sizeof(iter->dumpit)); 235 } 236 237 WARN_ON(!cnt); 238 iter->entry_idx += cnt; 239 } 240 241 static int 242 genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family, 243 struct genl_split_ops *op) 244 { 245 int i; 246 247 for (i = 0; i < family->n_split_ops; i++) 248 if (family->split_ops[i].cmd == cmd && 249 family->split_ops[i].flags & flag) { 250 *op = family->split_ops[i]; 251 return 0; 252 } 253 254 return -ENOENT; 255 } 256 257 static int 258 genl_cmd_full_to_split(struct genl_split_ops *op, 259 const struct genl_family *family, 260 const struct genl_ops *full, u8 flags) 261 { 262 if ((flags & GENL_CMD_CAP_DO && !full->doit) || 263 (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) { 264 memset(op, 0, sizeof(*op)); 265 return -ENOENT; 266 } 267 268 if (flags & GENL_CMD_CAP_DUMP) { 269 op->start = full->start; 270 op->dumpit = full->dumpit; 271 op->done = full->done; 272 } else { 273 op->pre_doit = family->pre_doit; 274 op->doit = full->doit; 275 op->post_doit = family->post_doit; 276 } 277 278 if (flags & GENL_CMD_CAP_DUMP && 279 full->validate & GENL_DONT_VALIDATE_DUMP) { 280 op->policy = NULL; 281 op->maxattr = 0; 282 } else { 283 op->policy = full->policy; 284 op->maxattr = full->maxattr; 285 } 286 287 op->cmd = full->cmd; 288 op->internal_flags = full->internal_flags; 289 op->flags = full->flags; 290 op->validate = full->validate; 291 292 /* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */ 293 op->flags |= flags; 294 295 return 0; 296 } 297 298 /* Must make sure that op is initialized to 0 on failure */ 299 static int 300 genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family, 301 struct genl_split_ops *op) 302 { 303 struct genl_ops full; 304 int err; 305 306 err = genl_get_cmd_full(cmd, family, &full); 307 if (err == -ENOENT) 308 err = genl_get_cmd_small(cmd, family, &full); 309 /* Found one of legacy forms */ 310 if (err == 0) 311 return genl_cmd_full_to_split(op, family, &full, flags); 312 313 err = genl_get_cmd_split(cmd, flags, family, op); 314 if (err) 315 memset(op, 0, sizeof(*op)); 316 return err; 317 } 318 319 /* For policy dumping only, get ops of both do and dump. 320 * Fail if both are missing, genl_get_cmd() will zero-init in case of failure. 321 */ 322 static int 323 genl_get_cmd_both(u32 cmd, const struct genl_family *family, 324 struct genl_split_ops *doit, struct genl_split_ops *dumpit) 325 { 326 int err1, err2; 327 328 err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit); 329 err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit); 330 331 return err1 && err2 ? -ENOENT : 0; 332 } 333 334 static bool 335 genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter) 336 { 337 iter->family = family; 338 iter->cmd_idx = 0; 339 iter->entry_idx = 0; 340 341 iter->flags = 0; 342 343 return iter->family->n_ops + 344 iter->family->n_small_ops + 345 iter->family->n_split_ops; 346 } 347 348 static bool genl_op_iter_next(struct genl_op_iter *iter) 349 { 350 const struct genl_family *family = iter->family; 351 bool legacy_op = true; 352 struct genl_ops op; 353 354 if (iter->entry_idx < family->n_ops) { 355 genl_op_from_full(family, iter->entry_idx, &op); 356 } else if (iter->entry_idx < family->n_ops + family->n_small_ops) { 357 genl_op_from_small(family, iter->entry_idx - family->n_ops, 358 &op); 359 } else if (iter->entry_idx < 360 family->n_ops + family->n_small_ops + family->n_split_ops) { 361 legacy_op = false; 362 /* updates entry_idx */ 363 genl_op_from_split(iter); 364 } else { 365 return false; 366 } 367 368 iter->cmd_idx++; 369 370 if (legacy_op) { 371 iter->entry_idx++; 372 373 genl_cmd_full_to_split(&iter->doit, family, 374 &op, GENL_CMD_CAP_DO); 375 genl_cmd_full_to_split(&iter->dumpit, family, 376 &op, GENL_CMD_CAP_DUMP); 377 } 378 379 iter->cmd = iter->doit.cmd | iter->dumpit.cmd; 380 iter->flags = iter->doit.flags | iter->dumpit.flags; 381 382 return true; 383 } 384 385 static void 386 genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src) 387 { 388 *dst = *src; 389 } 390 391 static unsigned int genl_op_iter_idx(struct genl_op_iter *iter) 392 { 393 return iter->cmd_idx; 394 } 395 396 static int genl_allocate_reserve_groups(int n_groups, int *first_id) 397 { 398 unsigned long *new_groups; 399 int start = 0; 400 int i; 401 int id; 402 bool fits; 403 404 do { 405 if (start == 0) 406 id = find_first_zero_bit(mc_groups, 407 mc_groups_longs * 408 BITS_PER_LONG); 409 else 410 id = find_next_zero_bit(mc_groups, 411 mc_groups_longs * BITS_PER_LONG, 412 start); 413 414 fits = true; 415 for (i = id; 416 i < min_t(int, id + n_groups, 417 mc_groups_longs * BITS_PER_LONG); 418 i++) { 419 if (test_bit(i, mc_groups)) { 420 start = i; 421 fits = false; 422 break; 423 } 424 } 425 426 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) { 427 unsigned long new_longs = mc_groups_longs + 428 BITS_TO_LONGS(n_groups); 429 size_t nlen = new_longs * sizeof(unsigned long); 430 431 if (mc_groups == &mc_group_start) { 432 new_groups = kzalloc(nlen, GFP_KERNEL); 433 if (!new_groups) 434 return -ENOMEM; 435 mc_groups = new_groups; 436 *mc_groups = mc_group_start; 437 } else { 438 new_groups = krealloc(mc_groups, nlen, 439 GFP_KERNEL); 440 if (!new_groups) 441 return -ENOMEM; 442 mc_groups = new_groups; 443 for (i = 0; i < BITS_TO_LONGS(n_groups); i++) 444 mc_groups[mc_groups_longs + i] = 0; 445 } 446 mc_groups_longs = new_longs; 447 } 448 } while (!fits); 449 450 for (i = id; i < id + n_groups; i++) 451 set_bit(i, mc_groups); 452 *first_id = id; 453 return 0; 454 } 455 456 static struct genl_family genl_ctrl; 457 458 static int genl_validate_assign_mc_groups(struct genl_family *family) 459 { 460 int first_id; 461 int n_groups = family->n_mcgrps; 462 int err = 0, i; 463 bool groups_allocated = false; 464 465 if (!n_groups) 466 return 0; 467 468 for (i = 0; i < n_groups; i++) { 469 const struct genl_multicast_group *grp = &family->mcgrps[i]; 470 471 if (WARN_ON(grp->name[0] == '\0')) 472 return -EINVAL; 473 if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ))) 474 return -EINVAL; 475 } 476 477 /* special-case our own group and hacks */ 478 if (family == &genl_ctrl) { 479 first_id = GENL_ID_CTRL; 480 BUG_ON(n_groups != 1); 481 } else if (strcmp(family->name, "NET_DM") == 0) { 482 first_id = 1; 483 BUG_ON(n_groups != 1); 484 } else if (family->id == GENL_ID_VFS_DQUOT) { 485 first_id = GENL_ID_VFS_DQUOT; 486 BUG_ON(n_groups != 1); 487 } else if (family->id == GENL_ID_PMCRAID) { 488 first_id = GENL_ID_PMCRAID; 489 BUG_ON(n_groups != 1); 490 } else { 491 groups_allocated = true; 492 err = genl_allocate_reserve_groups(n_groups, &first_id); 493 if (err) 494 return err; 495 } 496 497 family->mcgrp_offset = first_id; 498 499 /* if still initializing, can't and don't need to realloc bitmaps */ 500 if (!init_net.genl_sock) 501 return 0; 502 503 if (family->netnsok) { 504 struct net *net; 505 506 netlink_table_grab(); 507 rcu_read_lock(); 508 for_each_net_rcu(net) { 509 err = __netlink_change_ngroups(net->genl_sock, 510 mc_groups_longs * BITS_PER_LONG); 511 if (err) { 512 /* 513 * No need to roll back, can only fail if 514 * memory allocation fails and then the 515 * number of _possible_ groups has been 516 * increased on some sockets which is ok. 517 */ 518 break; 519 } 520 } 521 rcu_read_unlock(); 522 netlink_table_ungrab(); 523 } else { 524 err = netlink_change_ngroups(init_net.genl_sock, 525 mc_groups_longs * BITS_PER_LONG); 526 } 527 528 if (groups_allocated && err) { 529 for (i = 0; i < family->n_mcgrps; i++) 530 clear_bit(family->mcgrp_offset + i, mc_groups); 531 } 532 533 return err; 534 } 535 536 static void genl_unregister_mc_groups(const struct genl_family *family) 537 { 538 struct net *net; 539 int i; 540 541 netlink_table_grab(); 542 rcu_read_lock(); 543 for_each_net_rcu(net) { 544 for (i = 0; i < family->n_mcgrps; i++) 545 __netlink_clear_multicast_users( 546 net->genl_sock, family->mcgrp_offset + i); 547 } 548 rcu_read_unlock(); 549 netlink_table_ungrab(); 550 551 for (i = 0; i < family->n_mcgrps; i++) { 552 int grp_id = family->mcgrp_offset + i; 553 554 if (grp_id != 1) 555 clear_bit(grp_id, mc_groups); 556 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family, 557 &family->mcgrps[i], grp_id); 558 } 559 } 560 561 static bool genl_split_op_check(const struct genl_split_ops *op) 562 { 563 if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO | 564 GENL_CMD_CAP_DUMP)) != 1)) 565 return true; 566 return false; 567 } 568 569 static int genl_validate_ops(const struct genl_family *family) 570 { 571 struct genl_op_iter i, j; 572 unsigned int s; 573 574 if (WARN_ON(family->n_ops && !family->ops) || 575 WARN_ON(family->n_small_ops && !family->small_ops) || 576 WARN_ON(family->n_split_ops && !family->split_ops)) 577 return -EINVAL; 578 579 for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) { 580 if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP))) 581 return -EINVAL; 582 583 if (WARN_ON(i.cmd >= family->resv_start_op && 584 (i.doit.validate || i.dumpit.validate))) 585 return -EINVAL; 586 587 genl_op_iter_copy(&j, &i); 588 while (genl_op_iter_next(&j)) { 589 if (i.cmd == j.cmd) 590 return -EINVAL; 591 } 592 } 593 594 if (family->n_split_ops) { 595 if (genl_split_op_check(&family->split_ops[0])) 596 return -EINVAL; 597 } 598 599 for (s = 1; s < family->n_split_ops; s++) { 600 const struct genl_split_ops *a, *b; 601 602 a = &family->split_ops[s - 1]; 603 b = &family->split_ops[s]; 604 605 if (genl_split_op_check(b)) 606 return -EINVAL; 607 608 /* Check sort order */ 609 if (a->cmd < b->cmd) { 610 continue; 611 } else if (a->cmd > b->cmd) { 612 WARN_ON(1); 613 return -EINVAL; 614 } 615 616 if (a->internal_flags != b->internal_flags || 617 ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO | 618 GENL_CMD_CAP_DUMP))) { 619 WARN_ON(1); 620 return -EINVAL; 621 } 622 623 if ((a->flags & GENL_CMD_CAP_DO) && 624 (b->flags & GENL_CMD_CAP_DUMP)) 625 continue; 626 627 WARN_ON(1); 628 return -EINVAL; 629 } 630 631 return 0; 632 } 633 634 static void *genl_sk_priv_alloc(struct genl_family *family) 635 { 636 void *priv; 637 638 priv = kzalloc(family->sock_priv_size, GFP_KERNEL); 639 if (!priv) 640 return ERR_PTR(-ENOMEM); 641 642 if (family->sock_priv_init) 643 family->sock_priv_init(priv); 644 645 return priv; 646 } 647 648 static void genl_sk_priv_free(const struct genl_family *family, void *priv) 649 { 650 if (family->sock_priv_destroy) 651 family->sock_priv_destroy(priv); 652 kfree(priv); 653 } 654 655 static int genl_sk_privs_alloc(struct genl_family *family) 656 { 657 if (!family->sock_priv_size) 658 return 0; 659 660 family->sock_privs = kzalloc(sizeof(*family->sock_privs), GFP_KERNEL); 661 if (!family->sock_privs) 662 return -ENOMEM; 663 xa_init(family->sock_privs); 664 return 0; 665 } 666 667 static void genl_sk_privs_free(const struct genl_family *family) 668 { 669 unsigned long id; 670 void *priv; 671 672 if (!family->sock_priv_size) 673 return; 674 675 xa_for_each(family->sock_privs, id, priv) 676 genl_sk_priv_free(family, priv); 677 678 xa_destroy(family->sock_privs); 679 kfree(family->sock_privs); 680 } 681 682 static void genl_sk_priv_free_by_sock(struct genl_family *family, 683 struct sock *sk) 684 { 685 void *priv; 686 687 if (!family->sock_priv_size) 688 return; 689 priv = xa_erase(family->sock_privs, (unsigned long) sk); 690 if (!priv) 691 return; 692 genl_sk_priv_free(family, priv); 693 } 694 695 static void genl_release(struct sock *sk, unsigned long *groups) 696 { 697 struct genl_family *family; 698 unsigned int id; 699 700 down_read(&cb_lock); 701 702 idr_for_each_entry(&genl_fam_idr, family, id) 703 genl_sk_priv_free_by_sock(family, sk); 704 705 up_read(&cb_lock); 706 } 707 708 /** 709 * __genl_sk_priv_get - Get family private pointer for socket, if exists 710 * 711 * @family: family 712 * @sk: socket 713 * 714 * Lookup a private memory for a Generic netlink family and specified socket. 715 * 716 * Caller should make sure this is called in RCU read locked section. 717 * 718 * Return: valid pointer on success, otherwise negative error value 719 * encoded by ERR_PTR(), NULL in case priv does not exist. 720 */ 721 void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk) 722 { 723 if (WARN_ON_ONCE(!family->sock_privs)) 724 return ERR_PTR(-EINVAL); 725 return xa_load(family->sock_privs, (unsigned long) sk); 726 } 727 728 /** 729 * genl_sk_priv_get - Get family private pointer for socket 730 * 731 * @family: family 732 * @sk: socket 733 * 734 * Lookup a private memory for a Generic netlink family and specified socket. 735 * Allocate the private memory in case it was not already done. 736 * 737 * Return: valid pointer on success, otherwise negative error value 738 * encoded by ERR_PTR(). 739 */ 740 void *genl_sk_priv_get(struct genl_family *family, struct sock *sk) 741 { 742 void *priv, *old_priv; 743 744 priv = __genl_sk_priv_get(family, sk); 745 if (priv) 746 return priv; 747 748 /* priv for the family does not exist so far, create it. */ 749 750 priv = genl_sk_priv_alloc(family); 751 if (IS_ERR(priv)) 752 return ERR_CAST(priv); 753 754 old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL, 755 priv, GFP_KERNEL); 756 if (old_priv) { 757 genl_sk_priv_free(family, priv); 758 if (xa_is_err(old_priv)) 759 return ERR_PTR(xa_err(old_priv)); 760 /* Race happened, priv for the socket was already inserted. */ 761 return old_priv; 762 } 763 return priv; 764 } 765 766 /** 767 * genl_register_family - register a generic netlink family 768 * @family: generic netlink family 769 * 770 * Registers the specified family after validating it first. Only one 771 * family may be registered with the same family name or identifier. 772 * 773 * The family's ops, multicast groups and module pointer must already 774 * be assigned. 775 * 776 * Return 0 on success or a negative error code. 777 */ 778 int genl_register_family(struct genl_family *family) 779 { 780 int err, i; 781 int start = GENL_START_ALLOC, end = GENL_MAX_ID; 782 783 err = genl_validate_ops(family); 784 if (err) 785 return err; 786 787 genl_lock_all(); 788 789 if (genl_family_find_byname(family->name)) { 790 err = -EEXIST; 791 goto errout_locked; 792 } 793 794 err = genl_sk_privs_alloc(family); 795 if (err) 796 goto errout_locked; 797 798 /* 799 * Sadly, a few cases need to be special-cased 800 * due to them having previously abused the API 801 * and having used their family ID also as their 802 * multicast group ID, so we use reserved IDs 803 * for both to be sure we can do that mapping. 804 */ 805 if (family == &genl_ctrl) { 806 /* and this needs to be special for initial family lookups */ 807 start = end = GENL_ID_CTRL; 808 } else if (strcmp(family->name, "pmcraid") == 0) { 809 start = end = GENL_ID_PMCRAID; 810 } else if (strcmp(family->name, "VFS_DQUOT") == 0) { 811 start = end = GENL_ID_VFS_DQUOT; 812 } 813 814 family->id = idr_alloc_cyclic(&genl_fam_idr, family, 815 start, end + 1, GFP_KERNEL); 816 if (family->id < 0) { 817 err = family->id; 818 goto errout_sk_privs_free; 819 } 820 821 err = genl_validate_assign_mc_groups(family); 822 if (err) 823 goto errout_remove; 824 825 genl_unlock_all(); 826 827 /* send all events */ 828 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); 829 for (i = 0; i < family->n_mcgrps; i++) 830 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, 831 &family->mcgrps[i], family->mcgrp_offset + i); 832 833 return 0; 834 835 errout_remove: 836 idr_remove(&genl_fam_idr, family->id); 837 errout_sk_privs_free: 838 genl_sk_privs_free(family); 839 errout_locked: 840 genl_unlock_all(); 841 return err; 842 } 843 EXPORT_SYMBOL(genl_register_family); 844 845 /** 846 * genl_unregister_family - unregister generic netlink family 847 * @family: generic netlink family 848 * 849 * Unregisters the specified family. 850 * 851 * Returns 0 on success or a negative error code. 852 */ 853 int genl_unregister_family(const struct genl_family *family) 854 { 855 genl_lock_all(); 856 857 if (!genl_family_find_byid(family->id)) { 858 genl_unlock_all(); 859 return -ENOENT; 860 } 861 862 genl_unregister_mc_groups(family); 863 864 idr_remove(&genl_fam_idr, family->id); 865 866 up_write(&cb_lock); 867 wait_event(genl_sk_destructing_waitq, 868 atomic_read(&genl_sk_destructing_cnt) == 0); 869 870 genl_sk_privs_free(family); 871 872 genl_unlock(); 873 874 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); 875 876 return 0; 877 } 878 EXPORT_SYMBOL(genl_unregister_family); 879 880 /** 881 * genlmsg_put - Add generic netlink header to netlink message 882 * @skb: socket buffer holding the message 883 * @portid: netlink portid the message is addressed to 884 * @seq: sequence number (usually the one of the sender) 885 * @family: generic netlink family 886 * @flags: netlink message flags 887 * @cmd: generic netlink command 888 * 889 * Returns pointer to user specific header 890 */ 891 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, 892 const struct genl_family *family, int flags, u8 cmd) 893 { 894 struct nlmsghdr *nlh; 895 struct genlmsghdr *hdr; 896 897 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN + 898 family->hdrsize, flags); 899 if (nlh == NULL) 900 return NULL; 901 902 hdr = nlmsg_data(nlh); 903 hdr->cmd = cmd; 904 hdr->version = family->version; 905 hdr->reserved = 0; 906 907 return (char *) hdr + GENL_HDRLEN; 908 } 909 EXPORT_SYMBOL(genlmsg_put); 910 911 static struct genl_dumpit_info *genl_dumpit_info_alloc(void) 912 { 913 return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL); 914 } 915 916 static void genl_dumpit_info_free(const struct genl_dumpit_info *info) 917 { 918 kfree(info); 919 } 920 921 static struct nlattr ** 922 genl_family_rcv_msg_attrs_parse(const struct genl_family *family, 923 struct nlmsghdr *nlh, 924 struct netlink_ext_ack *extack, 925 const struct genl_split_ops *ops, 926 int hdrlen, 927 enum genl_validate_flags no_strict_flag) 928 { 929 enum netlink_validation validate = ops->validate & no_strict_flag ? 930 NL_VALIDATE_LIBERAL : 931 NL_VALIDATE_STRICT; 932 struct nlattr **attrbuf; 933 int err; 934 935 if (!ops->maxattr) 936 return NULL; 937 938 attrbuf = kmalloc_array(ops->maxattr + 1, 939 sizeof(struct nlattr *), GFP_KERNEL); 940 if (!attrbuf) 941 return ERR_PTR(-ENOMEM); 942 943 err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy, 944 validate, extack); 945 if (err) { 946 kfree(attrbuf); 947 return ERR_PTR(err); 948 } 949 return attrbuf; 950 } 951 952 static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf) 953 { 954 kfree(attrbuf); 955 } 956 957 struct genl_start_context { 958 const struct genl_family *family; 959 struct nlmsghdr *nlh; 960 struct netlink_ext_ack *extack; 961 const struct genl_split_ops *ops; 962 int hdrlen; 963 }; 964 965 static int genl_start(struct netlink_callback *cb) 966 { 967 struct genl_start_context *ctx = cb->data; 968 const struct genl_split_ops *ops; 969 struct genl_dumpit_info *info; 970 struct nlattr **attrs = NULL; 971 int rc = 0; 972 973 ops = ctx->ops; 974 if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) && 975 ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen)) 976 return -EINVAL; 977 978 attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack, 979 ops, ctx->hdrlen, 980 GENL_DONT_VALIDATE_DUMP_STRICT); 981 if (IS_ERR(attrs)) 982 return PTR_ERR(attrs); 983 984 info = genl_dumpit_info_alloc(); 985 if (!info) { 986 genl_family_rcv_msg_attrs_free(attrs); 987 return -ENOMEM; 988 } 989 info->op = *ops; 990 info->info.family = ctx->family; 991 info->info.snd_seq = cb->nlh->nlmsg_seq; 992 info->info.snd_portid = NETLINK_CB(cb->skb).portid; 993 info->info.nlhdr = cb->nlh; 994 info->info.genlhdr = nlmsg_data(cb->nlh); 995 info->info.attrs = attrs; 996 genl_info_net_set(&info->info, sock_net(cb->skb->sk)); 997 info->info.extack = cb->extack; 998 memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr)); 999 1000 cb->data = info; 1001 if (ops->start) { 1002 genl_op_lock(ctx->family); 1003 rc = ops->start(cb); 1004 genl_op_unlock(ctx->family); 1005 } 1006 1007 if (rc) { 1008 genl_family_rcv_msg_attrs_free(info->info.attrs); 1009 genl_dumpit_info_free(info); 1010 cb->data = NULL; 1011 } 1012 return rc; 1013 } 1014 1015 static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 1016 { 1017 struct genl_dumpit_info *dump_info = cb->data; 1018 const struct genl_split_ops *ops = &dump_info->op; 1019 struct genl_info *info = &dump_info->info; 1020 int rc; 1021 1022 info->extack = cb->extack; 1023 1024 genl_op_lock(info->family); 1025 rc = ops->dumpit(skb, cb); 1026 genl_op_unlock(info->family); 1027 return rc; 1028 } 1029 1030 static int genl_done(struct netlink_callback *cb) 1031 { 1032 struct genl_dumpit_info *dump_info = cb->data; 1033 const struct genl_split_ops *ops = &dump_info->op; 1034 struct genl_info *info = &dump_info->info; 1035 int rc = 0; 1036 1037 info->extack = cb->extack; 1038 1039 if (ops->done) { 1040 genl_op_lock(info->family); 1041 rc = ops->done(cb); 1042 genl_op_unlock(info->family); 1043 } 1044 genl_family_rcv_msg_attrs_free(info->attrs); 1045 genl_dumpit_info_free(dump_info); 1046 return rc; 1047 } 1048 1049 static int genl_family_rcv_msg_dumpit(const struct genl_family *family, 1050 struct sk_buff *skb, 1051 struct nlmsghdr *nlh, 1052 struct netlink_ext_ack *extack, 1053 const struct genl_split_ops *ops, 1054 int hdrlen, struct net *net) 1055 { 1056 struct genl_start_context ctx; 1057 struct netlink_dump_control c = { 1058 .module = family->module, 1059 .data = &ctx, 1060 .start = genl_start, 1061 .dump = genl_dumpit, 1062 .done = genl_done, 1063 .extack = extack, 1064 }; 1065 int err; 1066 1067 ctx.family = family; 1068 ctx.nlh = nlh; 1069 ctx.extack = extack; 1070 ctx.ops = ops; 1071 ctx.hdrlen = hdrlen; 1072 1073 genl_op_unlock(family); 1074 err = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 1075 genl_op_lock(family); 1076 1077 return err; 1078 } 1079 1080 static int genl_family_rcv_msg_doit(const struct genl_family *family, 1081 struct sk_buff *skb, 1082 struct nlmsghdr *nlh, 1083 struct netlink_ext_ack *extack, 1084 const struct genl_split_ops *ops, 1085 int hdrlen, struct net *net) 1086 { 1087 struct nlattr **attrbuf; 1088 struct genl_info info; 1089 int err; 1090 1091 attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack, 1092 ops, hdrlen, 1093 GENL_DONT_VALIDATE_STRICT); 1094 if (IS_ERR(attrbuf)) 1095 return PTR_ERR(attrbuf); 1096 1097 info.snd_seq = nlh->nlmsg_seq; 1098 info.snd_portid = NETLINK_CB(skb).portid; 1099 info.family = family; 1100 info.nlhdr = nlh; 1101 info.genlhdr = nlmsg_data(nlh); 1102 info.attrs = attrbuf; 1103 info.extack = extack; 1104 genl_info_net_set(&info, net); 1105 memset(&info.user_ptr, 0, sizeof(info.user_ptr)); 1106 1107 if (ops->pre_doit) { 1108 err = ops->pre_doit(ops, skb, &info); 1109 if (err) 1110 goto out; 1111 } 1112 1113 err = ops->doit(skb, &info); 1114 1115 if (ops->post_doit) 1116 ops->post_doit(ops, skb, &info); 1117 1118 out: 1119 genl_family_rcv_msg_attrs_free(attrbuf); 1120 1121 return err; 1122 } 1123 1124 static int genl_header_check(const struct genl_family *family, 1125 struct nlmsghdr *nlh, struct genlmsghdr *hdr, 1126 struct netlink_ext_ack *extack) 1127 { 1128 u16 flags; 1129 1130 /* Only for commands added after we started validating */ 1131 if (hdr->cmd < family->resv_start_op) 1132 return 0; 1133 1134 if (hdr->reserved) { 1135 NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0"); 1136 return -EINVAL; 1137 } 1138 1139 /* Old netlink flags have pretty loose semantics, allow only the flags 1140 * consumed by the core where we can enforce the meaning. 1141 */ 1142 flags = nlh->nlmsg_flags; 1143 if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */ 1144 flags &= ~NLM_F_DUMP; 1145 if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) { 1146 NL_SET_ERR_MSG(extack, 1147 "ambiguous or reserved bits set in nlmsg_flags"); 1148 return -EINVAL; 1149 } 1150 1151 return 0; 1152 } 1153 1154 static int genl_family_rcv_msg(const struct genl_family *family, 1155 struct sk_buff *skb, 1156 struct nlmsghdr *nlh, 1157 struct netlink_ext_ack *extack) 1158 { 1159 struct net *net = sock_net(skb->sk); 1160 struct genlmsghdr *hdr = nlmsg_data(nlh); 1161 struct genl_split_ops op; 1162 int hdrlen; 1163 u8 flags; 1164 1165 /* this family doesn't exist in this netns */ 1166 if (!family->netnsok && !net_eq(net, &init_net)) 1167 return -ENOENT; 1168 1169 hdrlen = GENL_HDRLEN + family->hdrsize; 1170 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 1171 return -EINVAL; 1172 1173 if (genl_header_check(family, nlh, hdr, extack)) 1174 return -EINVAL; 1175 1176 flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ? 1177 GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO; 1178 if (genl_get_cmd(hdr->cmd, flags, family, &op)) 1179 return -EOPNOTSUPP; 1180 1181 if ((op.flags & GENL_ADMIN_PERM) && 1182 !netlink_capable(skb, CAP_NET_ADMIN)) 1183 return -EPERM; 1184 1185 if ((op.flags & GENL_UNS_ADMIN_PERM) && 1186 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1187 return -EPERM; 1188 1189 if (flags & GENL_CMD_CAP_DUMP) 1190 return genl_family_rcv_msg_dumpit(family, skb, nlh, extack, 1191 &op, hdrlen, net); 1192 else 1193 return genl_family_rcv_msg_doit(family, skb, nlh, extack, 1194 &op, hdrlen, net); 1195 } 1196 1197 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 1198 struct netlink_ext_ack *extack) 1199 { 1200 const struct genl_family *family; 1201 int err; 1202 1203 family = genl_family_find_byid(nlh->nlmsg_type); 1204 if (family == NULL) 1205 return -ENOENT; 1206 1207 genl_op_lock(family); 1208 err = genl_family_rcv_msg(family, skb, nlh, extack); 1209 genl_op_unlock(family); 1210 1211 return err; 1212 } 1213 1214 static void genl_rcv(struct sk_buff *skb) 1215 { 1216 down_read(&cb_lock); 1217 netlink_rcv_skb(skb, &genl_rcv_msg); 1218 up_read(&cb_lock); 1219 } 1220 1221 /************************************************************************** 1222 * Controller 1223 **************************************************************************/ 1224 1225 static struct genl_family genl_ctrl; 1226 1227 static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq, 1228 u32 flags, struct sk_buff *skb, u8 cmd) 1229 { 1230 struct genl_op_iter i; 1231 void *hdr; 1232 1233 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 1234 if (hdr == NULL) 1235 return -1; 1236 1237 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 1238 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || 1239 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || 1240 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || 1241 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) 1242 goto nla_put_failure; 1243 1244 if (genl_op_iter_init(family, &i)) { 1245 struct nlattr *nla_ops; 1246 1247 nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS); 1248 if (nla_ops == NULL) 1249 goto nla_put_failure; 1250 1251 while (genl_op_iter_next(&i)) { 1252 struct nlattr *nest; 1253 u32 op_flags; 1254 1255 op_flags = i.flags; 1256 if (i.doit.policy || i.dumpit.policy) 1257 op_flags |= GENL_CMD_CAP_HASPOL; 1258 1259 nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i)); 1260 if (nest == NULL) 1261 goto nla_put_failure; 1262 1263 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) || 1264 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags)) 1265 goto nla_put_failure; 1266 1267 nla_nest_end(skb, nest); 1268 } 1269 1270 nla_nest_end(skb, nla_ops); 1271 } 1272 1273 if (family->n_mcgrps) { 1274 struct nlattr *nla_grps; 1275 int i; 1276 1277 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS); 1278 if (nla_grps == NULL) 1279 goto nla_put_failure; 1280 1281 for (i = 0; i < family->n_mcgrps; i++) { 1282 struct nlattr *nest; 1283 const struct genl_multicast_group *grp; 1284 1285 grp = &family->mcgrps[i]; 1286 1287 nest = nla_nest_start_noflag(skb, i + 1); 1288 if (nest == NULL) 1289 goto nla_put_failure; 1290 1291 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, 1292 family->mcgrp_offset + i) || 1293 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 1294 grp->name)) 1295 goto nla_put_failure; 1296 1297 nla_nest_end(skb, nest); 1298 } 1299 nla_nest_end(skb, nla_grps); 1300 } 1301 1302 genlmsg_end(skb, hdr); 1303 return 0; 1304 1305 nla_put_failure: 1306 genlmsg_cancel(skb, hdr); 1307 return -EMSGSIZE; 1308 } 1309 1310 static int ctrl_fill_mcgrp_info(const struct genl_family *family, 1311 const struct genl_multicast_group *grp, 1312 int grp_id, u32 portid, u32 seq, u32 flags, 1313 struct sk_buff *skb, u8 cmd) 1314 { 1315 void *hdr; 1316 struct nlattr *nla_grps; 1317 struct nlattr *nest; 1318 1319 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 1320 if (hdr == NULL) 1321 return -1; 1322 1323 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 1324 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) 1325 goto nla_put_failure; 1326 1327 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS); 1328 if (nla_grps == NULL) 1329 goto nla_put_failure; 1330 1331 nest = nla_nest_start_noflag(skb, 1); 1332 if (nest == NULL) 1333 goto nla_put_failure; 1334 1335 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || 1336 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 1337 grp->name)) 1338 goto nla_put_failure; 1339 1340 nla_nest_end(skb, nest); 1341 nla_nest_end(skb, nla_grps); 1342 1343 genlmsg_end(skb, hdr); 1344 return 0; 1345 1346 nla_put_failure: 1347 genlmsg_cancel(skb, hdr); 1348 return -EMSGSIZE; 1349 } 1350 1351 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) 1352 { 1353 int n = 0; 1354 struct genl_family *rt; 1355 struct net *net = sock_net(skb->sk); 1356 int fams_to_skip = cb->args[0]; 1357 unsigned int id; 1358 1359 idr_for_each_entry(&genl_fam_idr, rt, id) { 1360 if (!rt->netnsok && !net_eq(net, &init_net)) 1361 continue; 1362 1363 if (n++ < fams_to_skip) 1364 continue; 1365 1366 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 1367 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1368 skb, CTRL_CMD_NEWFAMILY) < 0) { 1369 n--; 1370 break; 1371 } 1372 } 1373 1374 cb->args[0] = n; 1375 return skb->len; 1376 } 1377 1378 static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family, 1379 u32 portid, int seq, u8 cmd) 1380 { 1381 struct sk_buff *skb; 1382 int err; 1383 1384 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1385 if (skb == NULL) 1386 return ERR_PTR(-ENOBUFS); 1387 1388 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd); 1389 if (err < 0) { 1390 nlmsg_free(skb); 1391 return ERR_PTR(err); 1392 } 1393 1394 return skb; 1395 } 1396 1397 static struct sk_buff * 1398 ctrl_build_mcgrp_msg(const struct genl_family *family, 1399 const struct genl_multicast_group *grp, 1400 int grp_id, u32 portid, int seq, u8 cmd) 1401 { 1402 struct sk_buff *skb; 1403 int err; 1404 1405 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1406 if (skb == NULL) 1407 return ERR_PTR(-ENOBUFS); 1408 1409 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid, 1410 seq, 0, skb, cmd); 1411 if (err < 0) { 1412 nlmsg_free(skb); 1413 return ERR_PTR(err); 1414 } 1415 1416 return skb; 1417 } 1418 1419 static const struct nla_policy ctrl_policy_family[] = { 1420 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, 1421 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, 1422 .len = GENL_NAMSIZ - 1 }, 1423 }; 1424 1425 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) 1426 { 1427 struct sk_buff *msg; 1428 const struct genl_family *res = NULL; 1429 int err = -EINVAL; 1430 1431 if (info->attrs[CTRL_ATTR_FAMILY_ID]) { 1432 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); 1433 res = genl_family_find_byid(id); 1434 err = -ENOENT; 1435 } 1436 1437 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { 1438 char *name; 1439 1440 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); 1441 res = genl_family_find_byname(name); 1442 #ifdef CONFIG_MODULES 1443 if (res == NULL) { 1444 genl_unlock(); 1445 up_read(&cb_lock); 1446 request_module("net-pf-%d-proto-%d-family-%s", 1447 PF_NETLINK, NETLINK_GENERIC, name); 1448 down_read(&cb_lock); 1449 genl_lock(); 1450 res = genl_family_find_byname(name); 1451 } 1452 #endif 1453 err = -ENOENT; 1454 } 1455 1456 if (res == NULL) 1457 return err; 1458 1459 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) { 1460 /* family doesn't exist here */ 1461 return -ENOENT; 1462 } 1463 1464 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq, 1465 CTRL_CMD_NEWFAMILY); 1466 if (IS_ERR(msg)) 1467 return PTR_ERR(msg); 1468 1469 return genlmsg_reply(msg, info); 1470 } 1471 1472 static int genl_ctrl_event(int event, const struct genl_family *family, 1473 const struct genl_multicast_group *grp, 1474 int grp_id) 1475 { 1476 struct sk_buff *msg; 1477 1478 /* genl is still initialising */ 1479 if (!init_net.genl_sock) 1480 return 0; 1481 1482 switch (event) { 1483 case CTRL_CMD_NEWFAMILY: 1484 case CTRL_CMD_DELFAMILY: 1485 WARN_ON(grp); 1486 msg = ctrl_build_family_msg(family, 0, 0, event); 1487 break; 1488 case CTRL_CMD_NEWMCAST_GRP: 1489 case CTRL_CMD_DELMCAST_GRP: 1490 BUG_ON(!grp); 1491 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); 1492 break; 1493 default: 1494 return -EINVAL; 1495 } 1496 1497 if (IS_ERR(msg)) 1498 return PTR_ERR(msg); 1499 1500 if (!family->netnsok) { 1501 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0, 1502 0, GFP_KERNEL); 1503 } else { 1504 rcu_read_lock(); 1505 genlmsg_multicast_allns(&genl_ctrl, msg, 0, 1506 0, GFP_ATOMIC); 1507 rcu_read_unlock(); 1508 } 1509 1510 return 0; 1511 } 1512 1513 struct ctrl_dump_policy_ctx { 1514 struct netlink_policy_dump_state *state; 1515 const struct genl_family *rt; 1516 struct genl_op_iter *op_iter; 1517 u32 op; 1518 u16 fam_id; 1519 u8 dump_map:1, 1520 single_op:1; 1521 }; 1522 1523 static const struct nla_policy ctrl_policy_policy[] = { 1524 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, 1525 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, 1526 .len = GENL_NAMSIZ - 1 }, 1527 [CTRL_ATTR_OP] = { .type = NLA_U32 }, 1528 }; 1529 1530 static int ctrl_dumppolicy_start(struct netlink_callback *cb) 1531 { 1532 const struct genl_dumpit_info *info = genl_dumpit_info(cb); 1533 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx; 1534 struct nlattr **tb = info->info.attrs; 1535 const struct genl_family *rt; 1536 struct genl_op_iter i; 1537 int err; 1538 1539 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); 1540 1541 if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME]) 1542 return -EINVAL; 1543 1544 if (tb[CTRL_ATTR_FAMILY_ID]) { 1545 ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]); 1546 } else { 1547 rt = genl_family_find_byname( 1548 nla_data(tb[CTRL_ATTR_FAMILY_NAME])); 1549 if (!rt) 1550 return -ENOENT; 1551 ctx->fam_id = rt->id; 1552 } 1553 1554 rt = genl_family_find_byid(ctx->fam_id); 1555 if (!rt) 1556 return -ENOENT; 1557 1558 ctx->rt = rt; 1559 1560 if (tb[CTRL_ATTR_OP]) { 1561 struct genl_split_ops doit, dump; 1562 1563 ctx->single_op = true; 1564 ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]); 1565 1566 err = genl_get_cmd_both(ctx->op, rt, &doit, &dump); 1567 if (err) { 1568 NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]); 1569 return err; 1570 } 1571 1572 if (doit.policy) { 1573 err = netlink_policy_dump_add_policy(&ctx->state, 1574 doit.policy, 1575 doit.maxattr); 1576 if (err) 1577 goto err_free_state; 1578 } 1579 if (dump.policy) { 1580 err = netlink_policy_dump_add_policy(&ctx->state, 1581 dump.policy, 1582 dump.maxattr); 1583 if (err) 1584 goto err_free_state; 1585 } 1586 1587 if (!ctx->state) 1588 return -ENODATA; 1589 1590 ctx->dump_map = 1; 1591 return 0; 1592 } 1593 1594 ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL); 1595 if (!ctx->op_iter) 1596 return -ENOMEM; 1597 1598 genl_op_iter_init(rt, ctx->op_iter); 1599 ctx->dump_map = genl_op_iter_next(ctx->op_iter); 1600 1601 for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) { 1602 if (i.doit.policy) { 1603 err = netlink_policy_dump_add_policy(&ctx->state, 1604 i.doit.policy, 1605 i.doit.maxattr); 1606 if (err) 1607 goto err_free_state; 1608 } 1609 if (i.dumpit.policy) { 1610 err = netlink_policy_dump_add_policy(&ctx->state, 1611 i.dumpit.policy, 1612 i.dumpit.maxattr); 1613 if (err) 1614 goto err_free_state; 1615 } 1616 } 1617 1618 if (!ctx->state) { 1619 err = -ENODATA; 1620 goto err_free_op_iter; 1621 } 1622 return 0; 1623 1624 err_free_state: 1625 netlink_policy_dump_free(ctx->state); 1626 err_free_op_iter: 1627 kfree(ctx->op_iter); 1628 return err; 1629 } 1630 1631 static void *ctrl_dumppolicy_prep(struct sk_buff *skb, 1632 struct netlink_callback *cb) 1633 { 1634 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx; 1635 void *hdr; 1636 1637 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 1638 cb->nlh->nlmsg_seq, &genl_ctrl, 1639 NLM_F_MULTI, CTRL_CMD_GETPOLICY); 1640 if (!hdr) 1641 return NULL; 1642 1643 if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id)) 1644 return NULL; 1645 1646 return hdr; 1647 } 1648 1649 static int ctrl_dumppolicy_put_op(struct sk_buff *skb, 1650 struct netlink_callback *cb, 1651 struct genl_split_ops *doit, 1652 struct genl_split_ops *dumpit) 1653 { 1654 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx; 1655 struct nlattr *nest_pol, *nest_op; 1656 void *hdr; 1657 int idx; 1658 1659 /* skip if we have nothing to show */ 1660 if (!doit->policy && !dumpit->policy) 1661 return 0; 1662 1663 hdr = ctrl_dumppolicy_prep(skb, cb); 1664 if (!hdr) 1665 return -ENOBUFS; 1666 1667 nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY); 1668 if (!nest_pol) 1669 goto err; 1670 1671 nest_op = nla_nest_start(skb, doit->cmd); 1672 if (!nest_op) 1673 goto err; 1674 1675 if (doit->policy) { 1676 idx = netlink_policy_dump_get_policy_idx(ctx->state, 1677 doit->policy, 1678 doit->maxattr); 1679 1680 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx)) 1681 goto err; 1682 } 1683 if (dumpit->policy) { 1684 idx = netlink_policy_dump_get_policy_idx(ctx->state, 1685 dumpit->policy, 1686 dumpit->maxattr); 1687 1688 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx)) 1689 goto err; 1690 } 1691 1692 nla_nest_end(skb, nest_op); 1693 nla_nest_end(skb, nest_pol); 1694 genlmsg_end(skb, hdr); 1695 1696 return 0; 1697 err: 1698 genlmsg_cancel(skb, hdr); 1699 return -ENOBUFS; 1700 } 1701 1702 static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb) 1703 { 1704 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx; 1705 void *hdr; 1706 1707 if (ctx->dump_map) { 1708 if (ctx->single_op) { 1709 struct genl_split_ops doit, dumpit; 1710 1711 if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt, 1712 &doit, &dumpit))) 1713 return -ENOENT; 1714 1715 if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit)) 1716 return skb->len; 1717 1718 /* done with the per-op policy index list */ 1719 ctx->dump_map = 0; 1720 } 1721 1722 while (ctx->dump_map) { 1723 if (ctrl_dumppolicy_put_op(skb, cb, 1724 &ctx->op_iter->doit, 1725 &ctx->op_iter->dumpit)) 1726 return skb->len; 1727 1728 ctx->dump_map = genl_op_iter_next(ctx->op_iter); 1729 } 1730 } 1731 1732 while (netlink_policy_dump_loop(ctx->state)) { 1733 struct nlattr *nest; 1734 1735 hdr = ctrl_dumppolicy_prep(skb, cb); 1736 if (!hdr) 1737 goto nla_put_failure; 1738 1739 nest = nla_nest_start(skb, CTRL_ATTR_POLICY); 1740 if (!nest) 1741 goto nla_put_failure; 1742 1743 if (netlink_policy_dump_write(skb, ctx->state)) 1744 goto nla_put_failure; 1745 1746 nla_nest_end(skb, nest); 1747 1748 genlmsg_end(skb, hdr); 1749 } 1750 1751 return skb->len; 1752 1753 nla_put_failure: 1754 genlmsg_cancel(skb, hdr); 1755 return skb->len; 1756 } 1757 1758 static int ctrl_dumppolicy_done(struct netlink_callback *cb) 1759 { 1760 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx; 1761 1762 kfree(ctx->op_iter); 1763 netlink_policy_dump_free(ctx->state); 1764 return 0; 1765 } 1766 1767 static const struct genl_split_ops genl_ctrl_ops[] = { 1768 { 1769 .cmd = CTRL_CMD_GETFAMILY, 1770 .validate = GENL_DONT_VALIDATE_STRICT, 1771 .policy = ctrl_policy_family, 1772 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1, 1773 .doit = ctrl_getfamily, 1774 .flags = GENL_CMD_CAP_DO, 1775 }, 1776 { 1777 .cmd = CTRL_CMD_GETFAMILY, 1778 .validate = GENL_DONT_VALIDATE_DUMP, 1779 .policy = ctrl_policy_family, 1780 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1, 1781 .dumpit = ctrl_dumpfamily, 1782 .flags = GENL_CMD_CAP_DUMP, 1783 }, 1784 { 1785 .cmd = CTRL_CMD_GETPOLICY, 1786 .policy = ctrl_policy_policy, 1787 .maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1, 1788 .start = ctrl_dumppolicy_start, 1789 .dumpit = ctrl_dumppolicy, 1790 .done = ctrl_dumppolicy_done, 1791 .flags = GENL_CMD_CAP_DUMP, 1792 }, 1793 }; 1794 1795 static const struct genl_multicast_group genl_ctrl_groups[] = { 1796 { .name = "notify", }, 1797 }; 1798 1799 static struct genl_family genl_ctrl __ro_after_init = { 1800 .module = THIS_MODULE, 1801 .split_ops = genl_ctrl_ops, 1802 .n_split_ops = ARRAY_SIZE(genl_ctrl_ops), 1803 .resv_start_op = CTRL_CMD_GETPOLICY + 1, 1804 .mcgrps = genl_ctrl_groups, 1805 .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups), 1806 .id = GENL_ID_CTRL, 1807 .name = "nlctrl", 1808 .version = 0x2, 1809 .netnsok = true, 1810 }; 1811 1812 static int genl_bind(struct net *net, int group) 1813 { 1814 const struct genl_family *family; 1815 unsigned int id; 1816 int ret = 0; 1817 1818 down_read(&cb_lock); 1819 1820 idr_for_each_entry(&genl_fam_idr, family, id) { 1821 const struct genl_multicast_group *grp; 1822 int i; 1823 1824 if (family->n_mcgrps == 0) 1825 continue; 1826 1827 i = group - family->mcgrp_offset; 1828 if (i < 0 || i >= family->n_mcgrps) 1829 continue; 1830 1831 grp = &family->mcgrps[i]; 1832 if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) && 1833 !ns_capable(net->user_ns, CAP_NET_ADMIN)) 1834 ret = -EPERM; 1835 if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) && 1836 !ns_capable(net->user_ns, CAP_SYS_ADMIN)) 1837 ret = -EPERM; 1838 1839 break; 1840 } 1841 1842 up_read(&cb_lock); 1843 return ret; 1844 } 1845 1846 static int __net_init genl_pernet_init(struct net *net) 1847 { 1848 struct netlink_kernel_cfg cfg = { 1849 .input = genl_rcv, 1850 .flags = NL_CFG_F_NONROOT_RECV, 1851 .bind = genl_bind, 1852 .release = genl_release, 1853 }; 1854 1855 /* we'll bump the group number right afterwards */ 1856 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg); 1857 1858 if (!net->genl_sock && net_eq(net, &init_net)) 1859 panic("GENL: Cannot initialize generic netlink\n"); 1860 1861 if (!net->genl_sock) 1862 return -ENOMEM; 1863 1864 return 0; 1865 } 1866 1867 static void __net_exit genl_pernet_exit(struct net *net) 1868 { 1869 netlink_kernel_release(net->genl_sock); 1870 net->genl_sock = NULL; 1871 } 1872 1873 static struct pernet_operations genl_pernet_ops = { 1874 .init = genl_pernet_init, 1875 .exit = genl_pernet_exit, 1876 }; 1877 1878 static int __init genl_init(void) 1879 { 1880 int err; 1881 1882 err = genl_register_family(&genl_ctrl); 1883 if (err < 0) 1884 goto problem; 1885 1886 err = register_pernet_subsys(&genl_pernet_ops); 1887 if (err) 1888 goto problem; 1889 1890 return 0; 1891 1892 problem: 1893 panic("GENL: Cannot register controller: %d\n", err); 1894 } 1895 1896 core_initcall(genl_init); 1897 1898 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, 1899 gfp_t flags) 1900 { 1901 struct sk_buff *tmp; 1902 struct net *net, *prev = NULL; 1903 bool delivered = false; 1904 int err; 1905 1906 for_each_net_rcu(net) { 1907 if (prev) { 1908 tmp = skb_clone(skb, flags); 1909 if (!tmp) { 1910 err = -ENOMEM; 1911 goto error; 1912 } 1913 err = nlmsg_multicast(prev->genl_sock, tmp, 1914 portid, group, flags); 1915 if (!err) 1916 delivered = true; 1917 else if (err != -ESRCH) 1918 goto error; 1919 } 1920 1921 prev = net; 1922 } 1923 1924 err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 1925 if (!err) 1926 delivered = true; 1927 else if (err != -ESRCH) 1928 return err; 1929 return delivered ? 0 : -ESRCH; 1930 error: 1931 kfree_skb(skb); 1932 return err; 1933 } 1934 1935 int genlmsg_multicast_allns(const struct genl_family *family, 1936 struct sk_buff *skb, u32 portid, 1937 unsigned int group, gfp_t flags) 1938 { 1939 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1940 return -EINVAL; 1941 1942 group = family->mcgrp_offset + group; 1943 return genlmsg_mcast(skb, portid, group, flags); 1944 } 1945 EXPORT_SYMBOL(genlmsg_multicast_allns); 1946 1947 void genl_notify(const struct genl_family *family, struct sk_buff *skb, 1948 struct genl_info *info, u32 group, gfp_t flags) 1949 { 1950 struct net *net = genl_info_net(info); 1951 struct sock *sk = net->genl_sock; 1952 1953 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1954 return; 1955 1956 group = family->mcgrp_offset + group; 1957 nlmsg_notify(sk, skb, info->snd_portid, group, 1958 nlmsg_report(info->nlhdr), flags); 1959 } 1960 EXPORT_SYMBOL(genl_notify); 1961