1 /* 2 * NETLINK Generic Netlink Family 3 * 4 * Authors: Jamal Hadi Salim 5 * Thomas Graf <tgraf@suug.ch> 6 * Johannes Berg <johannes@sipsolutions.net> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/socket.h> 15 #include <linux/string.h> 16 #include <linux/skbuff.h> 17 #include <linux/mutex.h> 18 #include <linux/bitmap.h> 19 #include <linux/rwsem.h> 20 #include <net/sock.h> 21 #include <net/genetlink.h> 22 23 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 24 static DECLARE_RWSEM(cb_lock); 25 26 void genl_lock(void) 27 { 28 mutex_lock(&genl_mutex); 29 } 30 EXPORT_SYMBOL(genl_lock); 31 32 void genl_unlock(void) 33 { 34 mutex_unlock(&genl_mutex); 35 } 36 EXPORT_SYMBOL(genl_unlock); 37 38 #ifdef CONFIG_LOCKDEP 39 int lockdep_genl_is_held(void) 40 { 41 return lockdep_is_held(&genl_mutex); 42 } 43 EXPORT_SYMBOL(lockdep_genl_is_held); 44 #endif 45 46 static void genl_lock_all(void) 47 { 48 down_write(&cb_lock); 49 genl_lock(); 50 } 51 52 static void genl_unlock_all(void) 53 { 54 genl_unlock(); 55 up_write(&cb_lock); 56 } 57 58 #define GENL_FAM_TAB_SIZE 16 59 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) 60 61 static struct list_head family_ht[GENL_FAM_TAB_SIZE]; 62 /* 63 * Bitmap of multicast groups that are currently in use. 64 * 65 * To avoid an allocation at boot of just one unsigned long, 66 * declare it global instead. 67 * Bit 0 is marked as already used since group 0 is invalid. 68 * Bit 1 is marked as already used since the drop-monitor code 69 * abuses the API and thinks it can statically use group 1. 70 * That group will typically conflict with other groups that 71 * any proper users use. 72 * Bit 16 is marked as used since it's used for generic netlink 73 * and the code no longer marks pre-reserved IDs as used. 74 * Bit 17 is marked as already used since the VFS quota code 75 * also abused this API and relied on family == group ID, we 76 * cater to that by giving it a static family and group ID. 77 * Bit 18 is marked as already used since the PMCRAID driver 78 * did the same thing as the VFS quota code (maybe copied?) 79 */ 80 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 81 BIT(GENL_ID_VFS_DQUOT) | 82 BIT(GENL_ID_PMCRAID); 83 static unsigned long *mc_groups = &mc_group_start; 84 static unsigned long mc_groups_longs = 1; 85 86 static int genl_ctrl_event(int event, struct genl_family *family, 87 const struct genl_multicast_group *grp, 88 int grp_id); 89 90 static inline unsigned int genl_family_hash(unsigned int id) 91 { 92 return id & GENL_FAM_TAB_MASK; 93 } 94 95 static inline struct list_head *genl_family_chain(unsigned int id) 96 { 97 return &family_ht[genl_family_hash(id)]; 98 } 99 100 static struct genl_family *genl_family_find_byid(unsigned int id) 101 { 102 struct genl_family *f; 103 104 list_for_each_entry(f, genl_family_chain(id), family_list) 105 if (f->id == id) 106 return f; 107 108 return NULL; 109 } 110 111 static struct genl_family *genl_family_find_byname(char *name) 112 { 113 struct genl_family *f; 114 int i; 115 116 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) 117 list_for_each_entry(f, genl_family_chain(i), family_list) 118 if (strcmp(f->name, name) == 0) 119 return f; 120 121 return NULL; 122 } 123 124 static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) 125 { 126 int i; 127 128 for (i = 0; i < family->n_ops; i++) 129 if (family->ops[i].cmd == cmd) 130 return &family->ops[i]; 131 132 return NULL; 133 } 134 135 /* Of course we are going to have problems once we hit 136 * 2^16 alive types, but that can only happen by year 2K 137 */ 138 static u16 genl_generate_id(void) 139 { 140 static u16 id_gen_idx = GENL_MIN_ID; 141 int i; 142 143 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { 144 if (id_gen_idx != GENL_ID_VFS_DQUOT && 145 id_gen_idx != GENL_ID_PMCRAID && 146 !genl_family_find_byid(id_gen_idx)) 147 return id_gen_idx; 148 if (++id_gen_idx > GENL_MAX_ID) 149 id_gen_idx = GENL_MIN_ID; 150 } 151 152 return 0; 153 } 154 155 static int genl_allocate_reserve_groups(int n_groups, int *first_id) 156 { 157 unsigned long *new_groups; 158 int start = 0; 159 int i; 160 int id; 161 bool fits; 162 163 do { 164 if (start == 0) 165 id = find_first_zero_bit(mc_groups, 166 mc_groups_longs * 167 BITS_PER_LONG); 168 else 169 id = find_next_zero_bit(mc_groups, 170 mc_groups_longs * BITS_PER_LONG, 171 start); 172 173 fits = true; 174 for (i = id; 175 i < min_t(int, id + n_groups, 176 mc_groups_longs * BITS_PER_LONG); 177 i++) { 178 if (test_bit(i, mc_groups)) { 179 start = i; 180 fits = false; 181 break; 182 } 183 } 184 185 if (id >= mc_groups_longs * BITS_PER_LONG) { 186 unsigned long new_longs = mc_groups_longs + 187 BITS_TO_LONGS(n_groups); 188 size_t nlen = new_longs * sizeof(unsigned long); 189 190 if (mc_groups == &mc_group_start) { 191 new_groups = kzalloc(nlen, GFP_KERNEL); 192 if (!new_groups) 193 return -ENOMEM; 194 mc_groups = new_groups; 195 *mc_groups = mc_group_start; 196 } else { 197 new_groups = krealloc(mc_groups, nlen, 198 GFP_KERNEL); 199 if (!new_groups) 200 return -ENOMEM; 201 mc_groups = new_groups; 202 for (i = 0; i < BITS_TO_LONGS(n_groups); i++) 203 mc_groups[mc_groups_longs + i] = 0; 204 } 205 mc_groups_longs = new_longs; 206 } 207 } while (!fits); 208 209 for (i = id; i < id + n_groups; i++) 210 set_bit(i, mc_groups); 211 *first_id = id; 212 return 0; 213 } 214 215 static struct genl_family genl_ctrl; 216 217 static int genl_validate_assign_mc_groups(struct genl_family *family) 218 { 219 int first_id; 220 int n_groups = family->n_mcgrps; 221 int err = 0, i; 222 bool groups_allocated = false; 223 224 if (!n_groups) 225 return 0; 226 227 for (i = 0; i < n_groups; i++) { 228 const struct genl_multicast_group *grp = &family->mcgrps[i]; 229 230 if (WARN_ON(grp->name[0] == '\0')) 231 return -EINVAL; 232 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL)) 233 return -EINVAL; 234 } 235 236 /* special-case our own group and hacks */ 237 if (family == &genl_ctrl) { 238 first_id = GENL_ID_CTRL; 239 BUG_ON(n_groups != 1); 240 } else if (strcmp(family->name, "NET_DM") == 0) { 241 first_id = 1; 242 BUG_ON(n_groups != 1); 243 } else if (family->id == GENL_ID_VFS_DQUOT) { 244 first_id = GENL_ID_VFS_DQUOT; 245 BUG_ON(n_groups != 1); 246 } else if (family->id == GENL_ID_PMCRAID) { 247 first_id = GENL_ID_PMCRAID; 248 BUG_ON(n_groups != 1); 249 } else { 250 groups_allocated = true; 251 err = genl_allocate_reserve_groups(n_groups, &first_id); 252 if (err) 253 return err; 254 } 255 256 family->mcgrp_offset = first_id; 257 258 /* if still initializing, can't and don't need to to realloc bitmaps */ 259 if (!init_net.genl_sock) 260 return 0; 261 262 if (family->netnsok) { 263 struct net *net; 264 265 netlink_table_grab(); 266 rcu_read_lock(); 267 for_each_net_rcu(net) { 268 err = __netlink_change_ngroups(net->genl_sock, 269 mc_groups_longs * BITS_PER_LONG); 270 if (err) { 271 /* 272 * No need to roll back, can only fail if 273 * memory allocation fails and then the 274 * number of _possible_ groups has been 275 * increased on some sockets which is ok. 276 */ 277 break; 278 } 279 } 280 rcu_read_unlock(); 281 netlink_table_ungrab(); 282 } else { 283 err = netlink_change_ngroups(init_net.genl_sock, 284 mc_groups_longs * BITS_PER_LONG); 285 } 286 287 if (groups_allocated && err) { 288 for (i = 0; i < family->n_mcgrps; i++) 289 clear_bit(family->mcgrp_offset + i, mc_groups); 290 } 291 292 return err; 293 } 294 295 static void genl_unregister_mc_groups(struct genl_family *family) 296 { 297 struct net *net; 298 int i; 299 300 netlink_table_grab(); 301 rcu_read_lock(); 302 for_each_net_rcu(net) { 303 for (i = 0; i < family->n_mcgrps; i++) 304 __netlink_clear_multicast_users( 305 net->genl_sock, family->mcgrp_offset + i); 306 } 307 rcu_read_unlock(); 308 netlink_table_ungrab(); 309 310 for (i = 0; i < family->n_mcgrps; i++) { 311 int grp_id = family->mcgrp_offset + i; 312 313 if (grp_id != 1) 314 clear_bit(grp_id, mc_groups); 315 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family, 316 &family->mcgrps[i], grp_id); 317 } 318 } 319 320 static int genl_validate_ops(struct genl_family *family) 321 { 322 const struct genl_ops *ops = family->ops; 323 unsigned int n_ops = family->n_ops; 324 int i, j; 325 326 if (WARN_ON(n_ops && !ops)) 327 return -EINVAL; 328 329 if (!n_ops) 330 return 0; 331 332 for (i = 0; i < n_ops; i++) { 333 if (ops[i].dumpit == NULL && ops[i].doit == NULL) 334 return -EINVAL; 335 for (j = i + 1; j < n_ops; j++) 336 if (ops[i].cmd == ops[j].cmd) 337 return -EINVAL; 338 } 339 340 /* family is not registered yet, so no locking needed */ 341 family->ops = ops; 342 family->n_ops = n_ops; 343 344 return 0; 345 } 346 347 /** 348 * __genl_register_family - register a generic netlink family 349 * @family: generic netlink family 350 * 351 * Registers the specified family after validating it first. Only one 352 * family may be registered with the same family name or identifier. 353 * The family id may equal GENL_ID_GENERATE causing an unique id to 354 * be automatically generated and assigned. 355 * 356 * The family's ops array must already be assigned, you can use the 357 * genl_register_family_with_ops() helper function. 358 * 359 * Return 0 on success or a negative error code. 360 */ 361 int __genl_register_family(struct genl_family *family) 362 { 363 int err = -EINVAL, i; 364 365 if (family->id && family->id < GENL_MIN_ID) 366 goto errout; 367 368 if (family->id > GENL_MAX_ID) 369 goto errout; 370 371 err = genl_validate_ops(family); 372 if (err) 373 return err; 374 375 genl_lock_all(); 376 377 if (genl_family_find_byname(family->name)) { 378 err = -EEXIST; 379 goto errout_locked; 380 } 381 382 if (family->id == GENL_ID_GENERATE) { 383 u16 newid = genl_generate_id(); 384 385 if (!newid) { 386 err = -ENOMEM; 387 goto errout_locked; 388 } 389 390 family->id = newid; 391 } else if (genl_family_find_byid(family->id)) { 392 err = -EEXIST; 393 goto errout_locked; 394 } 395 396 if (family->maxattr && !family->parallel_ops) { 397 family->attrbuf = kmalloc((family->maxattr+1) * 398 sizeof(struct nlattr *), GFP_KERNEL); 399 if (family->attrbuf == NULL) { 400 err = -ENOMEM; 401 goto errout_locked; 402 } 403 } else 404 family->attrbuf = NULL; 405 406 err = genl_validate_assign_mc_groups(family); 407 if (err) 408 goto errout_locked; 409 410 list_add_tail(&family->family_list, genl_family_chain(family->id)); 411 genl_unlock_all(); 412 413 /* send all events */ 414 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); 415 for (i = 0; i < family->n_mcgrps; i++) 416 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, 417 &family->mcgrps[i], family->mcgrp_offset + i); 418 419 return 0; 420 421 errout_locked: 422 genl_unlock_all(); 423 errout: 424 return err; 425 } 426 EXPORT_SYMBOL(__genl_register_family); 427 428 /** 429 * genl_unregister_family - unregister generic netlink family 430 * @family: generic netlink family 431 * 432 * Unregisters the specified family. 433 * 434 * Returns 0 on success or a negative error code. 435 */ 436 int genl_unregister_family(struct genl_family *family) 437 { 438 struct genl_family *rc; 439 440 genl_lock_all(); 441 442 genl_unregister_mc_groups(family); 443 444 list_for_each_entry(rc, genl_family_chain(family->id), family_list) { 445 if (family->id != rc->id || strcmp(rc->name, family->name)) 446 continue; 447 448 list_del(&rc->family_list); 449 family->n_ops = 0; 450 genl_unlock_all(); 451 452 kfree(family->attrbuf); 453 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); 454 return 0; 455 } 456 457 genl_unlock_all(); 458 459 return -ENOENT; 460 } 461 EXPORT_SYMBOL(genl_unregister_family); 462 463 /** 464 * genlmsg_new_unicast - Allocate generic netlink message for unicast 465 * @payload: size of the message payload 466 * @info: information on destination 467 * @flags: the type of memory to allocate 468 * 469 * Allocates a new sk_buff large enough to cover the specified payload 470 * plus required Netlink headers. Will check receiving socket for 471 * memory mapped i/o capability and use it if enabled. Will fall back 472 * to non-mapped skb if message size exceeds the frame size of the ring. 473 */ 474 struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, 475 gfp_t flags) 476 { 477 size_t len = nlmsg_total_size(genlmsg_total_size(payload)); 478 479 return netlink_alloc_skb(info->dst_sk, len, info->snd_portid, flags); 480 } 481 EXPORT_SYMBOL_GPL(genlmsg_new_unicast); 482 483 /** 484 * genlmsg_put - Add generic netlink header to netlink message 485 * @skb: socket buffer holding the message 486 * @portid: netlink portid the message is addressed to 487 * @seq: sequence number (usually the one of the sender) 488 * @family: generic netlink family 489 * @flags: netlink message flags 490 * @cmd: generic netlink command 491 * 492 * Returns pointer to user specific header 493 */ 494 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, 495 struct genl_family *family, int flags, u8 cmd) 496 { 497 struct nlmsghdr *nlh; 498 struct genlmsghdr *hdr; 499 500 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN + 501 family->hdrsize, flags); 502 if (nlh == NULL) 503 return NULL; 504 505 hdr = nlmsg_data(nlh); 506 hdr->cmd = cmd; 507 hdr->version = family->version; 508 hdr->reserved = 0; 509 510 return (char *) hdr + GENL_HDRLEN; 511 } 512 EXPORT_SYMBOL(genlmsg_put); 513 514 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 515 { 516 /* our ops are always const - netlink API doesn't propagate that */ 517 const struct genl_ops *ops = cb->data; 518 int rc; 519 520 genl_lock(); 521 rc = ops->dumpit(skb, cb); 522 genl_unlock(); 523 return rc; 524 } 525 526 static int genl_lock_done(struct netlink_callback *cb) 527 { 528 /* our ops are always const - netlink API doesn't propagate that */ 529 const struct genl_ops *ops = cb->data; 530 int rc = 0; 531 532 if (ops->done) { 533 genl_lock(); 534 rc = ops->done(cb); 535 genl_unlock(); 536 } 537 return rc; 538 } 539 540 static int genl_family_rcv_msg(struct genl_family *family, 541 struct sk_buff *skb, 542 struct nlmsghdr *nlh) 543 { 544 const struct genl_ops *ops; 545 struct net *net = sock_net(skb->sk); 546 struct genl_info info; 547 struct genlmsghdr *hdr = nlmsg_data(nlh); 548 struct nlattr **attrbuf; 549 int hdrlen, err; 550 551 /* this family doesn't exist in this netns */ 552 if (!family->netnsok && !net_eq(net, &init_net)) 553 return -ENOENT; 554 555 hdrlen = GENL_HDRLEN + family->hdrsize; 556 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) 557 return -EINVAL; 558 559 ops = genl_get_cmd(hdr->cmd, family); 560 if (ops == NULL) 561 return -EOPNOTSUPP; 562 563 if ((ops->flags & GENL_ADMIN_PERM) && 564 !capable(CAP_NET_ADMIN)) 565 return -EPERM; 566 567 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 568 int rc; 569 570 if (ops->dumpit == NULL) 571 return -EOPNOTSUPP; 572 573 if (!family->parallel_ops) { 574 struct netlink_dump_control c = { 575 .module = family->module, 576 /* we have const, but the netlink API doesn't */ 577 .data = (void *)ops, 578 .dump = genl_lock_dumpit, 579 .done = genl_lock_done, 580 }; 581 582 genl_unlock(); 583 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 584 genl_lock(); 585 586 } else { 587 struct netlink_dump_control c = { 588 .module = family->module, 589 .dump = ops->dumpit, 590 .done = ops->done, 591 }; 592 593 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); 594 } 595 596 return rc; 597 } 598 599 if (ops->doit == NULL) 600 return -EOPNOTSUPP; 601 602 if (family->maxattr && family->parallel_ops) { 603 attrbuf = kmalloc((family->maxattr+1) * 604 sizeof(struct nlattr *), GFP_KERNEL); 605 if (attrbuf == NULL) 606 return -ENOMEM; 607 } else 608 attrbuf = family->attrbuf; 609 610 if (attrbuf) { 611 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, 612 ops->policy); 613 if (err < 0) 614 goto out; 615 } 616 617 info.snd_seq = nlh->nlmsg_seq; 618 info.snd_portid = NETLINK_CB(skb).portid; 619 info.nlhdr = nlh; 620 info.genlhdr = nlmsg_data(nlh); 621 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; 622 info.attrs = attrbuf; 623 info.dst_sk = skb->sk; 624 genl_info_net_set(&info, net); 625 memset(&info.user_ptr, 0, sizeof(info.user_ptr)); 626 627 if (family->pre_doit) { 628 err = family->pre_doit(ops, skb, &info); 629 if (err) 630 goto out; 631 } 632 633 err = ops->doit(skb, &info); 634 635 if (family->post_doit) 636 family->post_doit(ops, skb, &info); 637 638 out: 639 if (family->parallel_ops) 640 kfree(attrbuf); 641 642 return err; 643 } 644 645 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 646 { 647 struct genl_family *family; 648 int err; 649 650 family = genl_family_find_byid(nlh->nlmsg_type); 651 if (family == NULL) 652 return -ENOENT; 653 654 if (!family->parallel_ops) 655 genl_lock(); 656 657 err = genl_family_rcv_msg(family, skb, nlh); 658 659 if (!family->parallel_ops) 660 genl_unlock(); 661 662 return err; 663 } 664 665 static void genl_rcv(struct sk_buff *skb) 666 { 667 down_read(&cb_lock); 668 netlink_rcv_skb(skb, &genl_rcv_msg); 669 up_read(&cb_lock); 670 } 671 672 /************************************************************************** 673 * Controller 674 **************************************************************************/ 675 676 static struct genl_family genl_ctrl = { 677 .id = GENL_ID_CTRL, 678 .name = "nlctrl", 679 .version = 0x2, 680 .maxattr = CTRL_ATTR_MAX, 681 .netnsok = true, 682 }; 683 684 static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, 685 u32 flags, struct sk_buff *skb, u8 cmd) 686 { 687 void *hdr; 688 689 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 690 if (hdr == NULL) 691 return -1; 692 693 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 694 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || 695 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || 696 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || 697 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) 698 goto nla_put_failure; 699 700 if (family->n_ops) { 701 struct nlattr *nla_ops; 702 int i; 703 704 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); 705 if (nla_ops == NULL) 706 goto nla_put_failure; 707 708 for (i = 0; i < family->n_ops; i++) { 709 struct nlattr *nest; 710 const struct genl_ops *ops = &family->ops[i]; 711 u32 op_flags = ops->flags; 712 713 if (ops->dumpit) 714 op_flags |= GENL_CMD_CAP_DUMP; 715 if (ops->doit) 716 op_flags |= GENL_CMD_CAP_DO; 717 if (ops->policy) 718 op_flags |= GENL_CMD_CAP_HASPOL; 719 720 nest = nla_nest_start(skb, i + 1); 721 if (nest == NULL) 722 goto nla_put_failure; 723 724 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || 725 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags)) 726 goto nla_put_failure; 727 728 nla_nest_end(skb, nest); 729 } 730 731 nla_nest_end(skb, nla_ops); 732 } 733 734 if (family->n_mcgrps) { 735 struct nlattr *nla_grps; 736 int i; 737 738 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 739 if (nla_grps == NULL) 740 goto nla_put_failure; 741 742 for (i = 0; i < family->n_mcgrps; i++) { 743 struct nlattr *nest; 744 const struct genl_multicast_group *grp; 745 746 grp = &family->mcgrps[i]; 747 748 nest = nla_nest_start(skb, i + 1); 749 if (nest == NULL) 750 goto nla_put_failure; 751 752 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, 753 family->mcgrp_offset + i) || 754 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 755 grp->name)) 756 goto nla_put_failure; 757 758 nla_nest_end(skb, nest); 759 } 760 nla_nest_end(skb, nla_grps); 761 } 762 763 return genlmsg_end(skb, hdr); 764 765 nla_put_failure: 766 genlmsg_cancel(skb, hdr); 767 return -EMSGSIZE; 768 } 769 770 static int ctrl_fill_mcgrp_info(struct genl_family *family, 771 const struct genl_multicast_group *grp, 772 int grp_id, u32 portid, u32 seq, u32 flags, 773 struct sk_buff *skb, u8 cmd) 774 { 775 void *hdr; 776 struct nlattr *nla_grps; 777 struct nlattr *nest; 778 779 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 780 if (hdr == NULL) 781 return -1; 782 783 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 784 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) 785 goto nla_put_failure; 786 787 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 788 if (nla_grps == NULL) 789 goto nla_put_failure; 790 791 nest = nla_nest_start(skb, 1); 792 if (nest == NULL) 793 goto nla_put_failure; 794 795 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || 796 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 797 grp->name)) 798 goto nla_put_failure; 799 800 nla_nest_end(skb, nest); 801 nla_nest_end(skb, nla_grps); 802 803 return genlmsg_end(skb, hdr); 804 805 nla_put_failure: 806 genlmsg_cancel(skb, hdr); 807 return -EMSGSIZE; 808 } 809 810 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) 811 { 812 813 int i, n = 0; 814 struct genl_family *rt; 815 struct net *net = sock_net(skb->sk); 816 int chains_to_skip = cb->args[0]; 817 int fams_to_skip = cb->args[1]; 818 819 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { 820 n = 0; 821 list_for_each_entry(rt, genl_family_chain(i), family_list) { 822 if (!rt->netnsok && !net_eq(net, &init_net)) 823 continue; 824 if (++n < fams_to_skip) 825 continue; 826 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 827 cb->nlh->nlmsg_seq, NLM_F_MULTI, 828 skb, CTRL_CMD_NEWFAMILY) < 0) 829 goto errout; 830 } 831 832 fams_to_skip = 0; 833 } 834 835 errout: 836 cb->args[0] = i; 837 cb->args[1] = n; 838 839 return skb->len; 840 } 841 842 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, 843 u32 portid, int seq, u8 cmd) 844 { 845 struct sk_buff *skb; 846 int err; 847 848 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 849 if (skb == NULL) 850 return ERR_PTR(-ENOBUFS); 851 852 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd); 853 if (err < 0) { 854 nlmsg_free(skb); 855 return ERR_PTR(err); 856 } 857 858 return skb; 859 } 860 861 static struct sk_buff * 862 ctrl_build_mcgrp_msg(struct genl_family *family, 863 const struct genl_multicast_group *grp, 864 int grp_id, u32 portid, int seq, u8 cmd) 865 { 866 struct sk_buff *skb; 867 int err; 868 869 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 870 if (skb == NULL) 871 return ERR_PTR(-ENOBUFS); 872 873 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid, 874 seq, 0, skb, cmd); 875 if (err < 0) { 876 nlmsg_free(skb); 877 return ERR_PTR(err); 878 } 879 880 return skb; 881 } 882 883 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = { 884 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 }, 885 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING, 886 .len = GENL_NAMSIZ - 1 }, 887 }; 888 889 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) 890 { 891 struct sk_buff *msg; 892 struct genl_family *res = NULL; 893 int err = -EINVAL; 894 895 if (info->attrs[CTRL_ATTR_FAMILY_ID]) { 896 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); 897 res = genl_family_find_byid(id); 898 err = -ENOENT; 899 } 900 901 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { 902 char *name; 903 904 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); 905 res = genl_family_find_byname(name); 906 #ifdef CONFIG_MODULES 907 if (res == NULL) { 908 genl_unlock(); 909 up_read(&cb_lock); 910 request_module("net-pf-%d-proto-%d-family-%s", 911 PF_NETLINK, NETLINK_GENERIC, name); 912 down_read(&cb_lock); 913 genl_lock(); 914 res = genl_family_find_byname(name); 915 } 916 #endif 917 err = -ENOENT; 918 } 919 920 if (res == NULL) 921 return err; 922 923 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) { 924 /* family doesn't exist here */ 925 return -ENOENT; 926 } 927 928 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq, 929 CTRL_CMD_NEWFAMILY); 930 if (IS_ERR(msg)) 931 return PTR_ERR(msg); 932 933 return genlmsg_reply(msg, info); 934 } 935 936 static int genl_ctrl_event(int event, struct genl_family *family, 937 const struct genl_multicast_group *grp, 938 int grp_id) 939 { 940 struct sk_buff *msg; 941 942 /* genl is still initialising */ 943 if (!init_net.genl_sock) 944 return 0; 945 946 switch (event) { 947 case CTRL_CMD_NEWFAMILY: 948 case CTRL_CMD_DELFAMILY: 949 WARN_ON(grp); 950 msg = ctrl_build_family_msg(family, 0, 0, event); 951 break; 952 case CTRL_CMD_NEWMCAST_GRP: 953 case CTRL_CMD_DELMCAST_GRP: 954 BUG_ON(!grp); 955 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); 956 break; 957 default: 958 return -EINVAL; 959 } 960 961 if (IS_ERR(msg)) 962 return PTR_ERR(msg); 963 964 if (!family->netnsok) { 965 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0, 966 0, GFP_KERNEL); 967 } else { 968 rcu_read_lock(); 969 genlmsg_multicast_allns(&genl_ctrl, msg, 0, 970 0, GFP_ATOMIC); 971 rcu_read_unlock(); 972 } 973 974 return 0; 975 } 976 977 static struct genl_ops genl_ctrl_ops[] = { 978 { 979 .cmd = CTRL_CMD_GETFAMILY, 980 .doit = ctrl_getfamily, 981 .dumpit = ctrl_dumpfamily, 982 .policy = ctrl_policy, 983 }, 984 }; 985 986 static struct genl_multicast_group genl_ctrl_groups[] = { 987 { .name = "notify", }, 988 }; 989 990 static int __net_init genl_pernet_init(struct net *net) 991 { 992 struct netlink_kernel_cfg cfg = { 993 .input = genl_rcv, 994 .flags = NL_CFG_F_NONROOT_RECV, 995 }; 996 997 /* we'll bump the group number right afterwards */ 998 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg); 999 1000 if (!net->genl_sock && net_eq(net, &init_net)) 1001 panic("GENL: Cannot initialize generic netlink\n"); 1002 1003 if (!net->genl_sock) 1004 return -ENOMEM; 1005 1006 return 0; 1007 } 1008 1009 static void __net_exit genl_pernet_exit(struct net *net) 1010 { 1011 netlink_kernel_release(net->genl_sock); 1012 net->genl_sock = NULL; 1013 } 1014 1015 static struct pernet_operations genl_pernet_ops = { 1016 .init = genl_pernet_init, 1017 .exit = genl_pernet_exit, 1018 }; 1019 1020 static int __init genl_init(void) 1021 { 1022 int i, err; 1023 1024 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) 1025 INIT_LIST_HEAD(&family_ht[i]); 1026 1027 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops, 1028 genl_ctrl_groups); 1029 if (err < 0) 1030 goto problem; 1031 1032 err = register_pernet_subsys(&genl_pernet_ops); 1033 if (err) 1034 goto problem; 1035 1036 return 0; 1037 1038 problem: 1039 panic("GENL: Cannot register controller: %d\n", err); 1040 } 1041 1042 subsys_initcall(genl_init); 1043 1044 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, 1045 gfp_t flags) 1046 { 1047 struct sk_buff *tmp; 1048 struct net *net, *prev = NULL; 1049 int err; 1050 1051 for_each_net_rcu(net) { 1052 if (prev) { 1053 tmp = skb_clone(skb, flags); 1054 if (!tmp) { 1055 err = -ENOMEM; 1056 goto error; 1057 } 1058 err = nlmsg_multicast(prev->genl_sock, tmp, 1059 portid, group, flags); 1060 if (err) 1061 goto error; 1062 } 1063 1064 prev = net; 1065 } 1066 1067 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 1068 error: 1069 kfree_skb(skb); 1070 return err; 1071 } 1072 1073 int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb, 1074 u32 portid, unsigned int group, gfp_t flags) 1075 { 1076 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1077 return -EINVAL; 1078 group = family->mcgrp_offset + group; 1079 return genlmsg_mcast(skb, portid, group, flags); 1080 } 1081 EXPORT_SYMBOL(genlmsg_multicast_allns); 1082 1083 void genl_notify(struct genl_family *family, 1084 struct sk_buff *skb, struct net *net, u32 portid, u32 group, 1085 struct nlmsghdr *nlh, gfp_t flags) 1086 { 1087 struct sock *sk = net->genl_sock; 1088 int report = 0; 1089 1090 if (nlh) 1091 report = nlmsg_report(nlh); 1092 1093 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 1094 return; 1095 group = family->mcgrp_offset + group; 1096 nlmsg_notify(sk, skb, portid, group, report, flags); 1097 } 1098 EXPORT_SYMBOL(genl_notify); 1099