1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NETLINK Generic Netlink Family
4 *
5 * Authors: Jamal Hadi Salim
6 * Thomas Graf <tgraf@suug.ch>
7 * Johannes Berg <johannes@sipsolutions.net>
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/string_helpers.h>
17 #include <linux/skbuff.h>
18 #include <linux/mutex.h>
19 #include <linux/bitmap.h>
20 #include <linux/rwsem.h>
21 #include <linux/idr.h>
22 #include <net/sock.h>
23 #include <net/genetlink.h>
24
25 #include "genetlink.h"
26
27 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
28 static DECLARE_RWSEM(cb_lock);
29
30 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
31 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
32
genl_lock(void)33 void genl_lock(void)
34 {
35 mutex_lock(&genl_mutex);
36 }
37 EXPORT_SYMBOL(genl_lock);
38
genl_unlock(void)39 void genl_unlock(void)
40 {
41 mutex_unlock(&genl_mutex);
42 }
43 EXPORT_SYMBOL(genl_unlock);
44
genl_lock_all(void)45 static void genl_lock_all(void)
46 {
47 down_write(&cb_lock);
48 genl_lock();
49 }
50
genl_unlock_all(void)51 static void genl_unlock_all(void)
52 {
53 genl_unlock();
54 up_write(&cb_lock);
55 }
56
genl_op_lock(const struct genl_family * family)57 static void genl_op_lock(const struct genl_family *family)
58 {
59 if (!family->parallel_ops)
60 genl_lock();
61 }
62
genl_op_unlock(const struct genl_family * family)63 static void genl_op_unlock(const struct genl_family *family)
64 {
65 if (!family->parallel_ops)
66 genl_unlock();
67 }
68
69 static DEFINE_IDR(genl_fam_idr);
70
71 /*
72 * Bitmap of multicast groups that are currently in use.
73 *
74 * To avoid an allocation at boot of just one unsigned long,
75 * declare it global instead.
76 * Bit 0 is marked as already used since group 0 is invalid.
77 * Bit 1 is marked as already used since the drop-monitor code
78 * abuses the API and thinks it can statically use group 1.
79 * That group will typically conflict with other groups that
80 * any proper users use.
81 * Bit 16 is marked as used since it's used for generic netlink
82 * and the code no longer marks pre-reserved IDs as used.
83 * Bit 17 is marked as already used since the VFS quota code
84 * also abused this API and relied on family == group ID, we
85 * cater to that by giving it a static family and group ID.
86 * Bit 18 is marked as already used since the PMCRAID driver
87 * did the same thing as the VFS quota code (maybe copied?)
88 */
89 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
90 BIT(GENL_ID_VFS_DQUOT) |
91 BIT(GENL_ID_PMCRAID);
92 static unsigned long *mc_groups = &mc_group_start;
93 static unsigned long mc_groups_longs = 1;
94
95 /* We need the last attribute with non-zero ID therefore a 2-entry array */
96 static struct nla_policy genl_policy_reject_all[] = {
97 { .type = NLA_REJECT },
98 { .type = NLA_REJECT },
99 };
100
101 static int genl_ctrl_event(int event, const struct genl_family *family,
102 const struct genl_multicast_group *grp,
103 int grp_id);
104
105 static void
genl_op_fill_in_reject_policy(const struct genl_family * family,struct genl_ops * op)106 genl_op_fill_in_reject_policy(const struct genl_family *family,
107 struct genl_ops *op)
108 {
109 BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
110
111 if (op->policy || op->cmd < family->resv_start_op)
112 return;
113
114 op->policy = genl_policy_reject_all;
115 op->maxattr = 1;
116 }
117
118 static void
genl_op_fill_in_reject_policy_split(const struct genl_family * family,struct genl_split_ops * op)119 genl_op_fill_in_reject_policy_split(const struct genl_family *family,
120 struct genl_split_ops *op)
121 {
122 if (op->policy)
123 return;
124
125 op->policy = genl_policy_reject_all;
126 op->maxattr = 1;
127 }
128
genl_family_find_byid(unsigned int id)129 static const struct genl_family *genl_family_find_byid(unsigned int id)
130 {
131 return idr_find(&genl_fam_idr, id);
132 }
133
genl_family_find_byname(char * name)134 static const struct genl_family *genl_family_find_byname(char *name)
135 {
136 const struct genl_family *family;
137 unsigned int id;
138
139 idr_for_each_entry(&genl_fam_idr, family, id)
140 if (strcmp(family->name, name) == 0)
141 return family;
142
143 return NULL;
144 }
145
146 struct genl_op_iter {
147 const struct genl_family *family;
148 struct genl_split_ops doit;
149 struct genl_split_ops dumpit;
150 int cmd_idx;
151 int entry_idx;
152 u32 cmd;
153 u8 flags;
154 };
155
genl_op_from_full(const struct genl_family * family,unsigned int i,struct genl_ops * op)156 static void genl_op_from_full(const struct genl_family *family,
157 unsigned int i, struct genl_ops *op)
158 {
159 *op = family->ops[i];
160
161 if (!op->maxattr)
162 op->maxattr = family->maxattr;
163 if (!op->policy)
164 op->policy = family->policy;
165
166 genl_op_fill_in_reject_policy(family, op);
167 }
168
genl_get_cmd_full(u32 cmd,const struct genl_family * family,struct genl_ops * op)169 static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
170 struct genl_ops *op)
171 {
172 int i;
173
174 for (i = 0; i < family->n_ops; i++)
175 if (family->ops[i].cmd == cmd) {
176 genl_op_from_full(family, i, op);
177 return 0;
178 }
179
180 return -ENOENT;
181 }
182
genl_op_from_small(const struct genl_family * family,unsigned int i,struct genl_ops * op)183 static void genl_op_from_small(const struct genl_family *family,
184 unsigned int i, struct genl_ops *op)
185 {
186 memset(op, 0, sizeof(*op));
187 op->doit = family->small_ops[i].doit;
188 op->dumpit = family->small_ops[i].dumpit;
189 op->cmd = family->small_ops[i].cmd;
190 op->internal_flags = family->small_ops[i].internal_flags;
191 op->flags = family->small_ops[i].flags;
192 op->validate = family->small_ops[i].validate;
193
194 op->maxattr = family->maxattr;
195 op->policy = family->policy;
196
197 genl_op_fill_in_reject_policy(family, op);
198 }
199
genl_get_cmd_small(u32 cmd,const struct genl_family * family,struct genl_ops * op)200 static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
201 struct genl_ops *op)
202 {
203 int i;
204
205 for (i = 0; i < family->n_small_ops; i++)
206 if (family->small_ops[i].cmd == cmd) {
207 genl_op_from_small(family, i, op);
208 return 0;
209 }
210
211 return -ENOENT;
212 }
213
genl_op_from_split(struct genl_op_iter * iter)214 static void genl_op_from_split(struct genl_op_iter *iter)
215 {
216 const struct genl_family *family = iter->family;
217 int i, cnt = 0;
218
219 i = iter->entry_idx - family->n_ops - family->n_small_ops;
220
221 if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
222 iter->doit = family->split_ops[i + cnt];
223 genl_op_fill_in_reject_policy_split(family, &iter->doit);
224 cnt++;
225 } else {
226 memset(&iter->doit, 0, sizeof(iter->doit));
227 }
228
229 if (i + cnt < family->n_split_ops &&
230 family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
231 (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
232 iter->dumpit = family->split_ops[i + cnt];
233 genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
234 cnt++;
235 } else {
236 memset(&iter->dumpit, 0, sizeof(iter->dumpit));
237 }
238
239 WARN_ON(!cnt);
240 iter->entry_idx += cnt;
241 }
242
243 static int
genl_get_cmd_split(u32 cmd,u8 flag,const struct genl_family * family,struct genl_split_ops * op)244 genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
245 struct genl_split_ops *op)
246 {
247 int i;
248
249 for (i = 0; i < family->n_split_ops; i++)
250 if (family->split_ops[i].cmd == cmd &&
251 family->split_ops[i].flags & flag) {
252 *op = family->split_ops[i];
253 return 0;
254 }
255
256 return -ENOENT;
257 }
258
259 static int
genl_cmd_full_to_split(struct genl_split_ops * op,const struct genl_family * family,const struct genl_ops * full,u8 flags)260 genl_cmd_full_to_split(struct genl_split_ops *op,
261 const struct genl_family *family,
262 const struct genl_ops *full, u8 flags)
263 {
264 if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
265 (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
266 memset(op, 0, sizeof(*op));
267 return -ENOENT;
268 }
269
270 if (flags & GENL_CMD_CAP_DUMP) {
271 op->start = full->start;
272 op->dumpit = full->dumpit;
273 op->done = full->done;
274 } else {
275 op->pre_doit = family->pre_doit;
276 op->doit = full->doit;
277 op->post_doit = family->post_doit;
278 }
279
280 if (flags & GENL_CMD_CAP_DUMP &&
281 full->validate & GENL_DONT_VALIDATE_DUMP) {
282 op->policy = NULL;
283 op->maxattr = 0;
284 } else {
285 op->policy = full->policy;
286 op->maxattr = full->maxattr;
287 }
288
289 op->cmd = full->cmd;
290 op->internal_flags = full->internal_flags;
291 op->flags = full->flags;
292 op->validate = full->validate;
293
294 /* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
295 op->flags |= flags;
296
297 return 0;
298 }
299
300 /* Must make sure that op is initialized to 0 on failure */
301 static int
genl_get_cmd(u32 cmd,u8 flags,const struct genl_family * family,struct genl_split_ops * op)302 genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
303 struct genl_split_ops *op)
304 {
305 struct genl_ops full;
306 int err;
307
308 err = genl_get_cmd_full(cmd, family, &full);
309 if (err == -ENOENT)
310 err = genl_get_cmd_small(cmd, family, &full);
311 /* Found one of legacy forms */
312 if (err == 0)
313 return genl_cmd_full_to_split(op, family, &full, flags);
314
315 err = genl_get_cmd_split(cmd, flags, family, op);
316 if (err)
317 memset(op, 0, sizeof(*op));
318 return err;
319 }
320
321 /* For policy dumping only, get ops of both do and dump.
322 * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
323 */
324 static int
genl_get_cmd_both(u32 cmd,const struct genl_family * family,struct genl_split_ops * doit,struct genl_split_ops * dumpit)325 genl_get_cmd_both(u32 cmd, const struct genl_family *family,
326 struct genl_split_ops *doit, struct genl_split_ops *dumpit)
327 {
328 int err1, err2;
329
330 err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
331 err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
332
333 return err1 && err2 ? -ENOENT : 0;
334 }
335
336 static bool
genl_op_iter_init(const struct genl_family * family,struct genl_op_iter * iter)337 genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
338 {
339 iter->family = family;
340 iter->cmd_idx = 0;
341 iter->entry_idx = 0;
342
343 iter->flags = 0;
344
345 return iter->family->n_ops +
346 iter->family->n_small_ops +
347 iter->family->n_split_ops;
348 }
349
genl_op_iter_next(struct genl_op_iter * iter)350 static bool genl_op_iter_next(struct genl_op_iter *iter)
351 {
352 const struct genl_family *family = iter->family;
353 bool legacy_op = true;
354 struct genl_ops op;
355
356 if (iter->entry_idx < family->n_ops) {
357 genl_op_from_full(family, iter->entry_idx, &op);
358 } else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
359 genl_op_from_small(family, iter->entry_idx - family->n_ops,
360 &op);
361 } else if (iter->entry_idx <
362 family->n_ops + family->n_small_ops + family->n_split_ops) {
363 legacy_op = false;
364 /* updates entry_idx */
365 genl_op_from_split(iter);
366 } else {
367 return false;
368 }
369
370 iter->cmd_idx++;
371
372 if (legacy_op) {
373 iter->entry_idx++;
374
375 genl_cmd_full_to_split(&iter->doit, family,
376 &op, GENL_CMD_CAP_DO);
377 genl_cmd_full_to_split(&iter->dumpit, family,
378 &op, GENL_CMD_CAP_DUMP);
379 }
380
381 iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
382 iter->flags = iter->doit.flags | iter->dumpit.flags;
383
384 return true;
385 }
386
387 static void
genl_op_iter_copy(struct genl_op_iter * dst,struct genl_op_iter * src)388 genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
389 {
390 *dst = *src;
391 }
392
genl_op_iter_idx(struct genl_op_iter * iter)393 static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
394 {
395 return iter->cmd_idx;
396 }
397
genl_allocate_reserve_groups(int n_groups,int * first_id)398 static int genl_allocate_reserve_groups(int n_groups, int *first_id)
399 {
400 unsigned long *new_groups;
401 int start = 0;
402 int i;
403 int id;
404 bool fits;
405
406 do {
407 if (start == 0)
408 id = find_first_zero_bit(mc_groups,
409 mc_groups_longs *
410 BITS_PER_LONG);
411 else
412 id = find_next_zero_bit(mc_groups,
413 mc_groups_longs * BITS_PER_LONG,
414 start);
415
416 fits = true;
417 for (i = id;
418 i < min_t(int, id + n_groups,
419 mc_groups_longs * BITS_PER_LONG);
420 i++) {
421 if (test_bit(i, mc_groups)) {
422 start = i;
423 fits = false;
424 break;
425 }
426 }
427
428 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
429 unsigned long new_longs = mc_groups_longs +
430 BITS_TO_LONGS(n_groups);
431 size_t nlen = new_longs * sizeof(unsigned long);
432
433 if (mc_groups == &mc_group_start) {
434 new_groups = kzalloc(nlen, GFP_KERNEL);
435 if (!new_groups)
436 return -ENOMEM;
437 mc_groups = new_groups;
438 *mc_groups = mc_group_start;
439 } else {
440 new_groups = krealloc(mc_groups, nlen,
441 GFP_KERNEL);
442 if (!new_groups)
443 return -ENOMEM;
444 mc_groups = new_groups;
445 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
446 mc_groups[mc_groups_longs + i] = 0;
447 }
448 mc_groups_longs = new_longs;
449 }
450 } while (!fits);
451
452 for (i = id; i < id + n_groups; i++)
453 set_bit(i, mc_groups);
454 *first_id = id;
455 return 0;
456 }
457
458 static struct genl_family genl_ctrl;
459
genl_validate_assign_mc_groups(struct genl_family * family)460 static int genl_validate_assign_mc_groups(struct genl_family *family)
461 {
462 int first_id;
463 int n_groups = family->n_mcgrps;
464 int err = 0, i;
465 bool groups_allocated = false;
466
467 if (!n_groups)
468 return 0;
469
470 for (i = 0; i < n_groups; i++) {
471 const struct genl_multicast_group *grp = &family->mcgrps[i];
472
473 if (WARN_ON(grp->name[0] == '\0'))
474 return -EINVAL;
475 if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
476 return -EINVAL;
477 }
478
479 /* special-case our own group and hacks */
480 if (family == &genl_ctrl) {
481 first_id = GENL_ID_CTRL;
482 BUG_ON(n_groups != 1);
483 } else if (strcmp(family->name, "NET_DM") == 0) {
484 first_id = 1;
485 BUG_ON(n_groups != 1);
486 } else if (family->id == GENL_ID_VFS_DQUOT) {
487 first_id = GENL_ID_VFS_DQUOT;
488 BUG_ON(n_groups != 1);
489 } else if (family->id == GENL_ID_PMCRAID) {
490 first_id = GENL_ID_PMCRAID;
491 BUG_ON(n_groups != 1);
492 } else {
493 groups_allocated = true;
494 err = genl_allocate_reserve_groups(n_groups, &first_id);
495 if (err)
496 return err;
497 }
498
499 family->mcgrp_offset = first_id;
500
501 /* if still initializing, can't and don't need to realloc bitmaps */
502 if (!init_net.genl_sock)
503 return 0;
504
505 if (family->netnsok) {
506 struct net *net;
507
508 netlink_table_grab();
509 rcu_read_lock();
510 for_each_net_rcu(net) {
511 err = __netlink_change_ngroups(net->genl_sock,
512 mc_groups_longs * BITS_PER_LONG);
513 if (err) {
514 /*
515 * No need to roll back, can only fail if
516 * memory allocation fails and then the
517 * number of _possible_ groups has been
518 * increased on some sockets which is ok.
519 */
520 break;
521 }
522 }
523 rcu_read_unlock();
524 netlink_table_ungrab();
525 } else {
526 err = netlink_change_ngroups(init_net.genl_sock,
527 mc_groups_longs * BITS_PER_LONG);
528 }
529
530 if (groups_allocated && err) {
531 for (i = 0; i < family->n_mcgrps; i++)
532 clear_bit(family->mcgrp_offset + i, mc_groups);
533 }
534
535 return err;
536 }
537
genl_unregister_mc_groups(const struct genl_family * family)538 static void genl_unregister_mc_groups(const struct genl_family *family)
539 {
540 struct net *net;
541 int i;
542
543 netlink_table_grab();
544 rcu_read_lock();
545 for_each_net_rcu(net) {
546 for (i = 0; i < family->n_mcgrps; i++)
547 __netlink_clear_multicast_users(
548 net->genl_sock, family->mcgrp_offset + i);
549 }
550 rcu_read_unlock();
551 netlink_table_ungrab();
552
553 for (i = 0; i < family->n_mcgrps; i++) {
554 int grp_id = family->mcgrp_offset + i;
555
556 if (grp_id != 1)
557 clear_bit(grp_id, mc_groups);
558 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
559 &family->mcgrps[i], grp_id);
560 }
561 }
562
genl_split_op_check(const struct genl_split_ops * op)563 static bool genl_split_op_check(const struct genl_split_ops *op)
564 {
565 if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
566 GENL_CMD_CAP_DUMP)) != 1))
567 return true;
568 return false;
569 }
570
genl_validate_ops(const struct genl_family * family)571 static int genl_validate_ops(const struct genl_family *family)
572 {
573 struct genl_op_iter i, j;
574 unsigned int s;
575
576 if (WARN_ON(family->n_ops && !family->ops) ||
577 WARN_ON(family->n_small_ops && !family->small_ops) ||
578 WARN_ON(family->n_split_ops && !family->split_ops))
579 return -EINVAL;
580
581 for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
582 if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
583 return -EINVAL;
584
585 if (WARN_ON(i.cmd >= family->resv_start_op &&
586 (i.doit.validate || i.dumpit.validate)))
587 return -EINVAL;
588
589 genl_op_iter_copy(&j, &i);
590 while (genl_op_iter_next(&j)) {
591 if (i.cmd == j.cmd)
592 return -EINVAL;
593 }
594 }
595
596 if (family->n_split_ops) {
597 if (genl_split_op_check(&family->split_ops[0]))
598 return -EINVAL;
599 }
600
601 for (s = 1; s < family->n_split_ops; s++) {
602 const struct genl_split_ops *a, *b;
603
604 a = &family->split_ops[s - 1];
605 b = &family->split_ops[s];
606
607 if (genl_split_op_check(b))
608 return -EINVAL;
609
610 /* Check sort order */
611 if (a->cmd < b->cmd) {
612 continue;
613 } else if (a->cmd > b->cmd) {
614 WARN_ON(1);
615 return -EINVAL;
616 }
617
618 if (a->internal_flags != b->internal_flags ||
619 ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
620 GENL_CMD_CAP_DUMP))) {
621 WARN_ON(1);
622 return -EINVAL;
623 }
624
625 if ((a->flags & GENL_CMD_CAP_DO) &&
626 (b->flags & GENL_CMD_CAP_DUMP))
627 continue;
628
629 WARN_ON(1);
630 return -EINVAL;
631 }
632
633 return 0;
634 }
635
genl_sk_priv_alloc(struct genl_family * family)636 static void *genl_sk_priv_alloc(struct genl_family *family)
637 {
638 void *priv;
639
640 priv = kzalloc(family->sock_priv_size, GFP_KERNEL);
641 if (!priv)
642 return ERR_PTR(-ENOMEM);
643
644 if (family->sock_priv_init)
645 family->sock_priv_init(priv);
646
647 return priv;
648 }
649
genl_sk_priv_free(const struct genl_family * family,void * priv)650 static void genl_sk_priv_free(const struct genl_family *family, void *priv)
651 {
652 if (family->sock_priv_destroy)
653 family->sock_priv_destroy(priv);
654 kfree(priv);
655 }
656
genl_sk_privs_alloc(struct genl_family * family)657 static int genl_sk_privs_alloc(struct genl_family *family)
658 {
659 if (!family->sock_priv_size)
660 return 0;
661
662 family->sock_privs = kzalloc_obj(*family->sock_privs);
663 if (!family->sock_privs)
664 return -ENOMEM;
665 xa_init(family->sock_privs);
666 return 0;
667 }
668
genl_sk_privs_free(const struct genl_family * family)669 static void genl_sk_privs_free(const struct genl_family *family)
670 {
671 unsigned long id;
672 void *priv;
673
674 if (!family->sock_priv_size)
675 return;
676
677 xa_for_each(family->sock_privs, id, priv)
678 genl_sk_priv_free(family, priv);
679
680 xa_destroy(family->sock_privs);
681 kfree(family->sock_privs);
682 }
683
genl_sk_priv_free_by_sock(struct genl_family * family,struct sock * sk)684 static void genl_sk_priv_free_by_sock(struct genl_family *family,
685 struct sock *sk)
686 {
687 void *priv;
688
689 if (!family->sock_priv_size)
690 return;
691 priv = xa_erase(family->sock_privs, (unsigned long) sk);
692 if (!priv)
693 return;
694 genl_sk_priv_free(family, priv);
695 }
696
genl_release(struct sock * sk,unsigned long * groups)697 static void genl_release(struct sock *sk, unsigned long *groups)
698 {
699 struct genl_family *family;
700 unsigned int id;
701
702 down_read(&cb_lock);
703
704 idr_for_each_entry(&genl_fam_idr, family, id)
705 genl_sk_priv_free_by_sock(family, sk);
706
707 up_read(&cb_lock);
708 }
709
710 /**
711 * __genl_sk_priv_get - Get family private pointer for socket, if exists
712 *
713 * @family: family
714 * @sk: socket
715 *
716 * Lookup a private memory for a Generic netlink family and specified socket.
717 *
718 * Caller should make sure this is called in RCU read locked section.
719 *
720 * Return: valid pointer on success, otherwise negative error value
721 * encoded by ERR_PTR(), NULL in case priv does not exist.
722 */
__genl_sk_priv_get(struct genl_family * family,struct sock * sk)723 void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk)
724 {
725 if (WARN_ON_ONCE(!family->sock_privs))
726 return ERR_PTR(-EINVAL);
727 return xa_load(family->sock_privs, (unsigned long) sk);
728 }
729
730 /**
731 * genl_sk_priv_get - Get family private pointer for socket
732 *
733 * @family: family
734 * @sk: socket
735 *
736 * Lookup a private memory for a Generic netlink family and specified socket.
737 * Allocate the private memory in case it was not already done.
738 *
739 * Return: valid pointer on success, otherwise negative error value
740 * encoded by ERR_PTR().
741 */
genl_sk_priv_get(struct genl_family * family,struct sock * sk)742 void *genl_sk_priv_get(struct genl_family *family, struct sock *sk)
743 {
744 void *priv, *old_priv;
745
746 priv = __genl_sk_priv_get(family, sk);
747 if (priv)
748 return priv;
749
750 /* priv for the family does not exist so far, create it. */
751
752 priv = genl_sk_priv_alloc(family);
753 if (IS_ERR(priv))
754 return ERR_CAST(priv);
755
756 old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL,
757 priv, GFP_KERNEL);
758 if (old_priv) {
759 genl_sk_priv_free(family, priv);
760 if (xa_is_err(old_priv))
761 return ERR_PTR(xa_err(old_priv));
762 /* Race happened, priv for the socket was already inserted. */
763 return old_priv;
764 }
765 return priv;
766 }
767
768 /**
769 * genl_register_family - register a generic netlink family
770 * @family: generic netlink family
771 *
772 * Registers the specified family after validating it first. Only one
773 * family may be registered with the same family name or identifier.
774 *
775 * The family's ops, multicast groups and module pointer must already
776 * be assigned.
777 *
778 * Return 0 on success or a negative error code.
779 */
genl_register_family(struct genl_family * family)780 int genl_register_family(struct genl_family *family)
781 {
782 int err, i;
783 int start = GENL_START_ALLOC, end = GENL_MAX_ID;
784
785 err = genl_validate_ops(family);
786 if (err)
787 return err;
788
789 genl_lock_all();
790
791 if (genl_family_find_byname(family->name)) {
792 err = -EEXIST;
793 goto errout_locked;
794 }
795
796 err = genl_sk_privs_alloc(family);
797 if (err)
798 goto errout_locked;
799
800 /*
801 * Sadly, a few cases need to be special-cased
802 * due to them having previously abused the API
803 * and having used their family ID also as their
804 * multicast group ID, so we use reserved IDs
805 * for both to be sure we can do that mapping.
806 */
807 if (family == &genl_ctrl) {
808 /* and this needs to be special for initial family lookups */
809 start = end = GENL_ID_CTRL;
810 } else if (strcmp(family->name, "pmcraid") == 0) {
811 start = end = GENL_ID_PMCRAID;
812 } else if (strcmp(family->name, "VFS_DQUOT") == 0) {
813 start = end = GENL_ID_VFS_DQUOT;
814 }
815
816 family->id = idr_alloc_cyclic(&genl_fam_idr, family,
817 start, end + 1, GFP_KERNEL);
818 if (family->id < 0) {
819 err = family->id;
820 goto errout_sk_privs_free;
821 }
822
823 err = genl_validate_assign_mc_groups(family);
824 if (err)
825 goto errout_remove;
826
827 genl_unlock_all();
828
829 /* send all events */
830 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
831 for (i = 0; i < family->n_mcgrps; i++)
832 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
833 &family->mcgrps[i], family->mcgrp_offset + i);
834
835 return 0;
836
837 errout_remove:
838 idr_remove(&genl_fam_idr, family->id);
839 errout_sk_privs_free:
840 genl_sk_privs_free(family);
841 errout_locked:
842 genl_unlock_all();
843 return err;
844 }
845 EXPORT_SYMBOL(genl_register_family);
846
847 /**
848 * genl_unregister_family - unregister generic netlink family
849 * @family: generic netlink family
850 *
851 * Unregisters the specified family.
852 *
853 * Returns 0 on success or a negative error code.
854 */
genl_unregister_family(const struct genl_family * family)855 int genl_unregister_family(const struct genl_family *family)
856 {
857 genl_lock_all();
858
859 if (!genl_family_find_byid(family->id)) {
860 genl_unlock_all();
861 return -ENOENT;
862 }
863
864 genl_unregister_mc_groups(family);
865
866 idr_remove(&genl_fam_idr, family->id);
867
868 up_write(&cb_lock);
869 wait_event(genl_sk_destructing_waitq,
870 atomic_read(&genl_sk_destructing_cnt) == 0);
871
872 genl_sk_privs_free(family);
873
874 genl_unlock();
875
876 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
877
878 return 0;
879 }
880 EXPORT_SYMBOL(genl_unregister_family);
881
882 /**
883 * genlmsg_put - Add generic netlink header to netlink message
884 * @skb: socket buffer holding the message
885 * @portid: netlink portid the message is addressed to
886 * @seq: sequence number (usually the one of the sender)
887 * @family: generic netlink family
888 * @flags: netlink message flags
889 * @cmd: generic netlink command
890 *
891 * Returns pointer to user specific header
892 */
genlmsg_put(struct sk_buff * skb,u32 portid,u32 seq,const struct genl_family * family,int flags,u8 cmd)893 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
894 const struct genl_family *family, int flags, u8 cmd)
895 {
896 struct nlmsghdr *nlh;
897 struct genlmsghdr *hdr;
898
899 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
900 family->hdrsize, flags);
901 if (nlh == NULL)
902 return NULL;
903
904 hdr = nlmsg_data(nlh);
905 hdr->cmd = cmd;
906 hdr->version = family->version;
907 hdr->reserved = 0;
908
909 return (char *) hdr + GENL_HDRLEN;
910 }
911 EXPORT_SYMBOL(genlmsg_put);
912
genl_dumpit_info_alloc(void)913 static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
914 {
915 return kmalloc_obj(struct genl_dumpit_info);
916 }
917
genl_dumpit_info_free(const struct genl_dumpit_info * info)918 static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
919 {
920 kfree(info);
921 }
922
923 static struct nlattr **
genl_family_rcv_msg_attrs_parse(const struct genl_family * family,struct nlmsghdr * nlh,struct netlink_ext_ack * extack,const struct genl_split_ops * ops,int hdrlen,enum genl_validate_flags no_strict_flag)924 genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
925 struct nlmsghdr *nlh,
926 struct netlink_ext_ack *extack,
927 const struct genl_split_ops *ops,
928 int hdrlen,
929 enum genl_validate_flags no_strict_flag)
930 {
931 enum netlink_validation validate = ops->validate & no_strict_flag ?
932 NL_VALIDATE_LIBERAL :
933 NL_VALIDATE_STRICT;
934 struct nlattr **attrbuf;
935 int err;
936
937 if (!ops->maxattr)
938 return NULL;
939
940 attrbuf = kmalloc_objs(struct nlattr *, ops->maxattr + 1);
941 if (!attrbuf)
942 return ERR_PTR(-ENOMEM);
943
944 err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
945 validate, extack);
946 if (err) {
947 kfree(attrbuf);
948 return ERR_PTR(err);
949 }
950 return attrbuf;
951 }
952
genl_family_rcv_msg_attrs_free(struct nlattr ** attrbuf)953 static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
954 {
955 kfree(attrbuf);
956 }
957
958 struct genl_start_context {
959 const struct genl_family *family;
960 struct nlmsghdr *nlh;
961 struct netlink_ext_ack *extack;
962 const struct genl_split_ops *ops;
963 int hdrlen;
964 };
965
genl_start(struct netlink_callback * cb)966 static int genl_start(struct netlink_callback *cb)
967 {
968 struct genl_start_context *ctx = cb->data;
969 const struct genl_split_ops *ops;
970 struct genl_dumpit_info *info;
971 struct nlattr **attrs = NULL;
972 int rc = 0;
973
974 ops = ctx->ops;
975 if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
976 ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
977 return -EINVAL;
978
979 attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
980 ops, ctx->hdrlen,
981 GENL_DONT_VALIDATE_DUMP_STRICT);
982 if (IS_ERR(attrs))
983 return PTR_ERR(attrs);
984
985 info = genl_dumpit_info_alloc();
986 if (!info) {
987 genl_family_rcv_msg_attrs_free(attrs);
988 return -ENOMEM;
989 }
990 info->op = *ops;
991 info->info.family = ctx->family;
992 info->info.snd_seq = cb->nlh->nlmsg_seq;
993 info->info.snd_portid = NETLINK_CB(cb->skb).portid;
994 info->info.nlhdr = cb->nlh;
995 info->info.genlhdr = nlmsg_data(cb->nlh);
996 info->info.attrs = attrs;
997 genl_info_net_set(&info->info, sock_net(cb->skb->sk));
998 info->info.extack = cb->extack;
999 memset(&info->info.ctx, 0, sizeof(info->info.ctx));
1000
1001 cb->data = info;
1002 if (ops->start) {
1003 genl_op_lock(ctx->family);
1004 rc = ops->start(cb);
1005 genl_op_unlock(ctx->family);
1006 }
1007
1008 if (rc) {
1009 genl_family_rcv_msg_attrs_free(info->info.attrs);
1010 genl_dumpit_info_free(info);
1011 cb->data = NULL;
1012 }
1013 return rc;
1014 }
1015
genl_dumpit(struct sk_buff * skb,struct netlink_callback * cb)1016 static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1017 {
1018 struct genl_dumpit_info *dump_info = cb->data;
1019 const struct genl_split_ops *ops = &dump_info->op;
1020 struct genl_info *info = &dump_info->info;
1021 int rc;
1022
1023 info->extack = cb->extack;
1024
1025 genl_op_lock(info->family);
1026 rc = ops->dumpit(skb, cb);
1027 genl_op_unlock(info->family);
1028 return rc;
1029 }
1030
genl_done(struct netlink_callback * cb)1031 static int genl_done(struct netlink_callback *cb)
1032 {
1033 struct genl_dumpit_info *dump_info = cb->data;
1034 const struct genl_split_ops *ops = &dump_info->op;
1035 struct genl_info *info = &dump_info->info;
1036 int rc = 0;
1037
1038 info->extack = cb->extack;
1039
1040 if (ops->done) {
1041 genl_op_lock(info->family);
1042 rc = ops->done(cb);
1043 genl_op_unlock(info->family);
1044 }
1045 genl_family_rcv_msg_attrs_free(info->attrs);
1046 genl_dumpit_info_free(dump_info);
1047 return rc;
1048 }
1049
genl_family_rcv_msg_dumpit(const struct genl_family * family,struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack,const struct genl_split_ops * ops,int hdrlen,struct net * net)1050 static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
1051 struct sk_buff *skb,
1052 struct nlmsghdr *nlh,
1053 struct netlink_ext_ack *extack,
1054 const struct genl_split_ops *ops,
1055 int hdrlen, struct net *net)
1056 {
1057 struct genl_start_context ctx;
1058 struct netlink_dump_control c = {
1059 .module = family->module,
1060 .data = &ctx,
1061 .start = genl_start,
1062 .dump = genl_dumpit,
1063 .done = genl_done,
1064 .extack = extack,
1065 };
1066 int err;
1067
1068 ctx.family = family;
1069 ctx.nlh = nlh;
1070 ctx.extack = extack;
1071 ctx.ops = ops;
1072 ctx.hdrlen = hdrlen;
1073
1074 genl_op_unlock(family);
1075 err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1076 genl_op_lock(family);
1077
1078 return err;
1079 }
1080
genl_family_rcv_msg_doit(const struct genl_family * family,struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack,const struct genl_split_ops * ops,int hdrlen,struct net * net)1081 static int genl_family_rcv_msg_doit(const struct genl_family *family,
1082 struct sk_buff *skb,
1083 struct nlmsghdr *nlh,
1084 struct netlink_ext_ack *extack,
1085 const struct genl_split_ops *ops,
1086 int hdrlen, struct net *net)
1087 {
1088 struct nlattr **attrbuf;
1089 struct genl_info info;
1090 int err;
1091
1092 attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
1093 ops, hdrlen,
1094 GENL_DONT_VALIDATE_STRICT);
1095 if (IS_ERR(attrbuf))
1096 return PTR_ERR(attrbuf);
1097
1098 info.snd_seq = nlh->nlmsg_seq;
1099 info.snd_portid = NETLINK_CB(skb).portid;
1100 info.family = family;
1101 info.nlhdr = nlh;
1102 info.genlhdr = nlmsg_data(nlh);
1103 info.attrs = attrbuf;
1104 info.extack = extack;
1105 genl_info_net_set(&info, net);
1106 memset(&info.ctx, 0, sizeof(info.ctx));
1107
1108 if (ops->pre_doit) {
1109 err = ops->pre_doit(ops, skb, &info);
1110 if (err)
1111 goto out;
1112 }
1113
1114 err = ops->doit(skb, &info);
1115
1116 if (ops->post_doit)
1117 ops->post_doit(ops, skb, &info);
1118
1119 out:
1120 genl_family_rcv_msg_attrs_free(attrbuf);
1121
1122 return err;
1123 }
1124
genl_header_check(const struct genl_family * family,struct nlmsghdr * nlh,struct genlmsghdr * hdr,struct netlink_ext_ack * extack)1125 static int genl_header_check(const struct genl_family *family,
1126 struct nlmsghdr *nlh, struct genlmsghdr *hdr,
1127 struct netlink_ext_ack *extack)
1128 {
1129 u16 flags;
1130
1131 /* Only for commands added after we started validating */
1132 if (hdr->cmd < family->resv_start_op)
1133 return 0;
1134
1135 if (hdr->reserved) {
1136 NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
1137 return -EINVAL;
1138 }
1139
1140 /* Old netlink flags have pretty loose semantics, allow only the flags
1141 * consumed by the core where we can enforce the meaning.
1142 */
1143 flags = nlh->nlmsg_flags;
1144 if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
1145 flags &= ~NLM_F_DUMP;
1146 if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
1147 NL_SET_ERR_MSG(extack,
1148 "ambiguous or reserved bits set in nlmsg_flags");
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153 }
1154
genl_family_rcv_msg(const struct genl_family * family,struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1155 static int genl_family_rcv_msg(const struct genl_family *family,
1156 struct sk_buff *skb,
1157 struct nlmsghdr *nlh,
1158 struct netlink_ext_ack *extack)
1159 {
1160 struct net *net = sock_net(skb->sk);
1161 struct genlmsghdr *hdr = nlmsg_data(nlh);
1162 struct genl_split_ops op;
1163 int hdrlen;
1164 u8 flags;
1165
1166 /* this family doesn't exist in this netns */
1167 if (!family->netnsok && !net_eq(net, &init_net))
1168 return -ENOENT;
1169
1170 hdrlen = GENL_HDRLEN + family->hdrsize;
1171 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
1172 return -EINVAL;
1173
1174 if (genl_header_check(family, nlh, hdr, extack))
1175 return -EINVAL;
1176
1177 flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
1178 GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
1179 if (genl_get_cmd(hdr->cmd, flags, family, &op))
1180 return -EOPNOTSUPP;
1181
1182 if ((op.flags & GENL_ADMIN_PERM) &&
1183 !netlink_capable(skb, CAP_NET_ADMIN))
1184 return -EPERM;
1185
1186 if ((op.flags & GENL_UNS_ADMIN_PERM) &&
1187 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1188 return -EPERM;
1189
1190 if (flags & GENL_CMD_CAP_DUMP)
1191 return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
1192 &op, hdrlen, net);
1193 else
1194 return genl_family_rcv_msg_doit(family, skb, nlh, extack,
1195 &op, hdrlen, net);
1196 }
1197
genl_rcv_msg(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1198 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
1199 struct netlink_ext_ack *extack)
1200 {
1201 const struct genl_family *family;
1202 int err;
1203
1204 family = genl_family_find_byid(nlh->nlmsg_type);
1205 if (family == NULL)
1206 return -ENOENT;
1207
1208 genl_op_lock(family);
1209 err = genl_family_rcv_msg(family, skb, nlh, extack);
1210 genl_op_unlock(family);
1211
1212 return err;
1213 }
1214
genl_rcv(struct sk_buff * skb)1215 static void genl_rcv(struct sk_buff *skb)
1216 {
1217 down_read(&cb_lock);
1218 netlink_rcv_skb(skb, &genl_rcv_msg);
1219 up_read(&cb_lock);
1220 }
1221
1222 /**************************************************************************
1223 * Controller
1224 **************************************************************************/
1225
1226 static struct genl_family genl_ctrl;
1227
ctrl_fill_info(const struct genl_family * family,u32 portid,u32 seq,u32 flags,struct sk_buff * skb,u8 cmd)1228 static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
1229 u32 flags, struct sk_buff *skb, u8 cmd)
1230 {
1231 struct genl_op_iter i;
1232 void *hdr;
1233
1234 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1235 if (hdr == NULL)
1236 return -EMSGSIZE;
1237
1238 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1239 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
1240 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
1241 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
1242 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
1243 goto nla_put_failure;
1244
1245 if (genl_op_iter_init(family, &i)) {
1246 struct nlattr *nla_ops;
1247
1248 nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
1249 if (nla_ops == NULL)
1250 goto nla_put_failure;
1251
1252 while (genl_op_iter_next(&i)) {
1253 struct nlattr *nest;
1254 u32 op_flags;
1255
1256 op_flags = i.flags;
1257 if (i.doit.policy || i.dumpit.policy)
1258 op_flags |= GENL_CMD_CAP_HASPOL;
1259
1260 nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
1261 if (nest == NULL)
1262 goto nla_put_failure;
1263
1264 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
1265 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
1266 goto nla_put_failure;
1267
1268 nla_nest_end(skb, nest);
1269 }
1270
1271 nla_nest_end(skb, nla_ops);
1272 }
1273
1274 if (family->n_mcgrps) {
1275 struct nlattr *nla_grps;
1276 int i;
1277
1278 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1279 if (nla_grps == NULL)
1280 goto nla_put_failure;
1281
1282 for (i = 0; i < family->n_mcgrps; i++) {
1283 struct nlattr *nest;
1284 const struct genl_multicast_group *grp;
1285
1286 grp = &family->mcgrps[i];
1287
1288 nest = nla_nest_start_noflag(skb, i + 1);
1289 if (nest == NULL)
1290 goto nla_put_failure;
1291
1292 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
1293 family->mcgrp_offset + i) ||
1294 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1295 grp->name))
1296 goto nla_put_failure;
1297
1298 nla_nest_end(skb, nest);
1299 }
1300 nla_nest_end(skb, nla_grps);
1301 }
1302
1303 genlmsg_end(skb, hdr);
1304 return 0;
1305
1306 nla_put_failure:
1307 genlmsg_cancel(skb, hdr);
1308 return -EMSGSIZE;
1309 }
1310
ctrl_fill_mcgrp_info(const struct genl_family * family,const struct genl_multicast_group * grp,int grp_id,u32 portid,u32 seq,u32 flags,struct sk_buff * skb,u8 cmd)1311 static int ctrl_fill_mcgrp_info(const struct genl_family *family,
1312 const struct genl_multicast_group *grp,
1313 int grp_id, u32 portid, u32 seq, u32 flags,
1314 struct sk_buff *skb, u8 cmd)
1315 {
1316 void *hdr;
1317 struct nlattr *nla_grps;
1318 struct nlattr *nest;
1319
1320 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1321 if (hdr == NULL)
1322 return -1;
1323
1324 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1325 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
1326 goto nla_put_failure;
1327
1328 nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1329 if (nla_grps == NULL)
1330 goto nla_put_failure;
1331
1332 nest = nla_nest_start_noflag(skb, 1);
1333 if (nest == NULL)
1334 goto nla_put_failure;
1335
1336 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
1337 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1338 grp->name))
1339 goto nla_put_failure;
1340
1341 nla_nest_end(skb, nest);
1342 nla_nest_end(skb, nla_grps);
1343
1344 genlmsg_end(skb, hdr);
1345 return 0;
1346
1347 nla_put_failure:
1348 genlmsg_cancel(skb, hdr);
1349 return -EMSGSIZE;
1350 }
1351
ctrl_dumpfamily(struct sk_buff * skb,struct netlink_callback * cb)1352 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
1353 {
1354 int n = 0;
1355 struct genl_family *rt;
1356 struct net *net = sock_net(skb->sk);
1357 int fams_to_skip = cb->args[0];
1358 unsigned int id;
1359 int err = 0;
1360
1361 idr_for_each_entry(&genl_fam_idr, rt, id) {
1362 if (!rt->netnsok && !net_eq(net, &init_net))
1363 continue;
1364
1365 if (n++ < fams_to_skip)
1366 continue;
1367
1368 err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
1369 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1370 skb, CTRL_CMD_NEWFAMILY);
1371 if (err) {
1372 n--;
1373 break;
1374 }
1375 }
1376
1377 cb->args[0] = n;
1378 return err;
1379 }
1380
ctrl_build_family_msg(const struct genl_family * family,u32 portid,int seq,u8 cmd)1381 static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
1382 u32 portid, int seq, u8 cmd)
1383 {
1384 struct sk_buff *skb;
1385 int err;
1386
1387 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1388 if (skb == NULL)
1389 return ERR_PTR(-ENOBUFS);
1390
1391 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
1392 if (err < 0) {
1393 nlmsg_free(skb);
1394 return ERR_PTR(err);
1395 }
1396
1397 return skb;
1398 }
1399
1400 static struct sk_buff *
ctrl_build_mcgrp_msg(const struct genl_family * family,const struct genl_multicast_group * grp,int grp_id,u32 portid,int seq,u8 cmd)1401 ctrl_build_mcgrp_msg(const struct genl_family *family,
1402 const struct genl_multicast_group *grp,
1403 int grp_id, u32 portid, int seq, u8 cmd)
1404 {
1405 struct sk_buff *skb;
1406 int err;
1407
1408 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1409 if (skb == NULL)
1410 return ERR_PTR(-ENOBUFS);
1411
1412 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
1413 seq, 0, skb, cmd);
1414 if (err < 0) {
1415 nlmsg_free(skb);
1416 return ERR_PTR(err);
1417 }
1418
1419 return skb;
1420 }
1421
1422 static const struct nla_policy ctrl_policy_family[] = {
1423 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
1424 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
1425 .len = GENL_NAMSIZ - 1 },
1426 };
1427
ctrl_getfamily(struct sk_buff * skb,struct genl_info * info)1428 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
1429 {
1430 struct sk_buff *msg;
1431 const struct genl_family *res = NULL;
1432 int err = -EINVAL;
1433
1434 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
1435 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
1436 res = genl_family_find_byid(id);
1437 err = -ENOENT;
1438 }
1439
1440 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
1441 char *name;
1442
1443 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
1444 res = genl_family_find_byname(name);
1445 #ifdef CONFIG_MODULES
1446 if (res == NULL) {
1447 genl_unlock();
1448 up_read(&cb_lock);
1449 request_module("net-pf-%d-proto-%d-family-%s",
1450 PF_NETLINK, NETLINK_GENERIC, name);
1451 down_read(&cb_lock);
1452 genl_lock();
1453 res = genl_family_find_byname(name);
1454 }
1455 #endif
1456 err = -ENOENT;
1457 }
1458
1459 if (res == NULL)
1460 return err;
1461
1462 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
1463 /* family doesn't exist here */
1464 return -ENOENT;
1465 }
1466
1467 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
1468 CTRL_CMD_NEWFAMILY);
1469 if (IS_ERR(msg))
1470 return PTR_ERR(msg);
1471
1472 return genlmsg_reply(msg, info);
1473 }
1474
genl_ctrl_event(int event,const struct genl_family * family,const struct genl_multicast_group * grp,int grp_id)1475 static int genl_ctrl_event(int event, const struct genl_family *family,
1476 const struct genl_multicast_group *grp,
1477 int grp_id)
1478 {
1479 struct sk_buff *msg;
1480
1481 /* genl is still initialising */
1482 if (!init_net.genl_sock)
1483 return 0;
1484
1485 switch (event) {
1486 case CTRL_CMD_NEWFAMILY:
1487 case CTRL_CMD_DELFAMILY:
1488 WARN_ON(grp);
1489 msg = ctrl_build_family_msg(family, 0, 0, event);
1490 break;
1491 case CTRL_CMD_NEWMCAST_GRP:
1492 case CTRL_CMD_DELMCAST_GRP:
1493 BUG_ON(!grp);
1494 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
1495 break;
1496 default:
1497 return -EINVAL;
1498 }
1499
1500 if (IS_ERR(msg))
1501 return PTR_ERR(msg);
1502
1503 if (!family->netnsok)
1504 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
1505 0, GFP_KERNEL);
1506 else
1507 genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
1508
1509 return 0;
1510 }
1511
1512 struct ctrl_dump_policy_ctx {
1513 struct netlink_policy_dump_state *state;
1514 const struct genl_family *rt;
1515 struct genl_op_iter *op_iter;
1516 u32 op;
1517 u16 fam_id;
1518 u8 dump_map:1,
1519 single_op:1;
1520 };
1521
1522 static const struct nla_policy ctrl_policy_policy[] = {
1523 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
1524 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
1525 .len = GENL_NAMSIZ - 1 },
1526 [CTRL_ATTR_OP] = { .type = NLA_U32 },
1527 };
1528
ctrl_dumppolicy_start(struct netlink_callback * cb)1529 static int ctrl_dumppolicy_start(struct netlink_callback *cb)
1530 {
1531 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
1532 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1533 struct nlattr **tb = info->info.attrs;
1534 const struct genl_family *rt;
1535 struct genl_op_iter i;
1536 int err;
1537
1538 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
1539
1540 if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
1541 return -EINVAL;
1542
1543 if (tb[CTRL_ATTR_FAMILY_ID]) {
1544 ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
1545 } else {
1546 rt = genl_family_find_byname(
1547 nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
1548 if (!rt)
1549 return -ENOENT;
1550 ctx->fam_id = rt->id;
1551 }
1552
1553 rt = genl_family_find_byid(ctx->fam_id);
1554 if (!rt)
1555 return -ENOENT;
1556
1557 ctx->rt = rt;
1558
1559 if (tb[CTRL_ATTR_OP]) {
1560 struct genl_split_ops doit, dump;
1561
1562 ctx->single_op = true;
1563 ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
1564
1565 err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
1566 if (err) {
1567 NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
1568 return err;
1569 }
1570
1571 if (doit.policy) {
1572 err = netlink_policy_dump_add_policy(&ctx->state,
1573 doit.policy,
1574 doit.maxattr);
1575 if (err)
1576 goto err_free_state;
1577 }
1578 if (dump.policy) {
1579 err = netlink_policy_dump_add_policy(&ctx->state,
1580 dump.policy,
1581 dump.maxattr);
1582 if (err)
1583 goto err_free_state;
1584 }
1585
1586 if (!ctx->state)
1587 return -ENODATA;
1588
1589 ctx->dump_map = 1;
1590 return 0;
1591 }
1592
1593 ctx->op_iter = kmalloc_obj(*ctx->op_iter);
1594 if (!ctx->op_iter)
1595 return -ENOMEM;
1596
1597 genl_op_iter_init(rt, ctx->op_iter);
1598 ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1599
1600 for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
1601 if (i.doit.policy) {
1602 err = netlink_policy_dump_add_policy(&ctx->state,
1603 i.doit.policy,
1604 i.doit.maxattr);
1605 if (err)
1606 goto err_free_state;
1607 }
1608 if (i.dumpit.policy) {
1609 err = netlink_policy_dump_add_policy(&ctx->state,
1610 i.dumpit.policy,
1611 i.dumpit.maxattr);
1612 if (err)
1613 goto err_free_state;
1614 }
1615 }
1616
1617 if (!ctx->state) {
1618 err = -ENODATA;
1619 goto err_free_op_iter;
1620 }
1621 return 0;
1622
1623 err_free_state:
1624 netlink_policy_dump_free(ctx->state);
1625 err_free_op_iter:
1626 kfree(ctx->op_iter);
1627 return err;
1628 }
1629
ctrl_dumppolicy_prep(struct sk_buff * skb,struct netlink_callback * cb)1630 static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
1631 struct netlink_callback *cb)
1632 {
1633 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1634 void *hdr;
1635
1636 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1637 cb->nlh->nlmsg_seq, &genl_ctrl,
1638 NLM_F_MULTI, CTRL_CMD_GETPOLICY);
1639 if (!hdr)
1640 return NULL;
1641
1642 if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
1643 return NULL;
1644
1645 return hdr;
1646 }
1647
ctrl_dumppolicy_put_op(struct sk_buff * skb,struct netlink_callback * cb,struct genl_split_ops * doit,struct genl_split_ops * dumpit)1648 static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
1649 struct netlink_callback *cb,
1650 struct genl_split_ops *doit,
1651 struct genl_split_ops *dumpit)
1652 {
1653 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1654 struct nlattr *nest_pol, *nest_op;
1655 void *hdr;
1656 int idx;
1657
1658 /* skip if we have nothing to show */
1659 if (!doit->policy && !dumpit->policy)
1660 return 0;
1661
1662 hdr = ctrl_dumppolicy_prep(skb, cb);
1663 if (!hdr)
1664 return -ENOBUFS;
1665
1666 nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
1667 if (!nest_pol)
1668 goto err;
1669
1670 nest_op = nla_nest_start(skb, doit->cmd);
1671 if (!nest_op)
1672 goto err;
1673
1674 if (doit->policy) {
1675 idx = netlink_policy_dump_get_policy_idx(ctx->state,
1676 doit->policy,
1677 doit->maxattr);
1678
1679 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
1680 goto err;
1681 }
1682 if (dumpit->policy) {
1683 idx = netlink_policy_dump_get_policy_idx(ctx->state,
1684 dumpit->policy,
1685 dumpit->maxattr);
1686
1687 if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
1688 goto err;
1689 }
1690
1691 nla_nest_end(skb, nest_op);
1692 nla_nest_end(skb, nest_pol);
1693 genlmsg_end(skb, hdr);
1694
1695 return 0;
1696 err:
1697 genlmsg_cancel(skb, hdr);
1698 return -ENOBUFS;
1699 }
1700
ctrl_dumppolicy(struct sk_buff * skb,struct netlink_callback * cb)1701 static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
1702 {
1703 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1704 void *hdr;
1705
1706 if (ctx->dump_map) {
1707 if (ctx->single_op) {
1708 struct genl_split_ops doit, dumpit;
1709
1710 if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
1711 &doit, &dumpit)))
1712 return -ENOENT;
1713
1714 if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
1715 return skb->len;
1716
1717 /* done with the per-op policy index list */
1718 ctx->dump_map = 0;
1719 }
1720
1721 while (ctx->dump_map) {
1722 if (ctrl_dumppolicy_put_op(skb, cb,
1723 &ctx->op_iter->doit,
1724 &ctx->op_iter->dumpit))
1725 return skb->len;
1726
1727 ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1728 }
1729 }
1730
1731 while (netlink_policy_dump_loop(ctx->state)) {
1732 struct nlattr *nest;
1733
1734 hdr = ctrl_dumppolicy_prep(skb, cb);
1735 if (!hdr)
1736 goto nla_put_failure;
1737
1738 nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
1739 if (!nest)
1740 goto nla_put_failure;
1741
1742 if (netlink_policy_dump_write(skb, ctx->state))
1743 goto nla_put_failure;
1744
1745 nla_nest_end(skb, nest);
1746
1747 genlmsg_end(skb, hdr);
1748 }
1749
1750 return skb->len;
1751
1752 nla_put_failure:
1753 genlmsg_cancel(skb, hdr);
1754 return skb->len;
1755 }
1756
ctrl_dumppolicy_done(struct netlink_callback * cb)1757 static int ctrl_dumppolicy_done(struct netlink_callback *cb)
1758 {
1759 struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1760
1761 kfree(ctx->op_iter);
1762 netlink_policy_dump_free(ctx->state);
1763 return 0;
1764 }
1765
1766 static const struct genl_split_ops genl_ctrl_ops[] = {
1767 {
1768 .cmd = CTRL_CMD_GETFAMILY,
1769 .validate = GENL_DONT_VALIDATE_STRICT,
1770 .policy = ctrl_policy_family,
1771 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
1772 .doit = ctrl_getfamily,
1773 .flags = GENL_CMD_CAP_DO,
1774 },
1775 {
1776 .cmd = CTRL_CMD_GETFAMILY,
1777 .validate = GENL_DONT_VALIDATE_DUMP,
1778 .policy = ctrl_policy_family,
1779 .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
1780 .dumpit = ctrl_dumpfamily,
1781 .flags = GENL_CMD_CAP_DUMP,
1782 },
1783 {
1784 .cmd = CTRL_CMD_GETPOLICY,
1785 .policy = ctrl_policy_policy,
1786 .maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1,
1787 .start = ctrl_dumppolicy_start,
1788 .dumpit = ctrl_dumppolicy,
1789 .done = ctrl_dumppolicy_done,
1790 .flags = GENL_CMD_CAP_DUMP,
1791 },
1792 };
1793
1794 static const struct genl_multicast_group genl_ctrl_groups[] = {
1795 { .name = "notify", },
1796 };
1797
1798 static struct genl_family genl_ctrl __ro_after_init = {
1799 .module = THIS_MODULE,
1800 .split_ops = genl_ctrl_ops,
1801 .n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
1802 .resv_start_op = CTRL_CMD_GETPOLICY + 1,
1803 .mcgrps = genl_ctrl_groups,
1804 .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
1805 .id = GENL_ID_CTRL,
1806 .name = "nlctrl",
1807 .version = 0x2,
1808 .netnsok = true,
1809 };
1810
genl_bind(struct net * net,int group)1811 static int genl_bind(struct net *net, int group)
1812 {
1813 const struct genl_family *family;
1814 unsigned int id;
1815 int ret = 0;
1816
1817 down_read(&cb_lock);
1818
1819 idr_for_each_entry(&genl_fam_idr, family, id) {
1820 const struct genl_multicast_group *grp;
1821 int i;
1822
1823 if (family->n_mcgrps == 0)
1824 continue;
1825
1826 i = group - family->mcgrp_offset;
1827 if (i < 0 || i >= family->n_mcgrps)
1828 continue;
1829
1830 grp = &family->mcgrps[i];
1831 if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) &&
1832 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1833 ret = -EPERM;
1834 if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) &&
1835 !ns_capable(net->user_ns, CAP_SYS_ADMIN))
1836 ret = -EPERM;
1837
1838 if (ret)
1839 break;
1840
1841 if (family->bind)
1842 family->bind(i);
1843
1844 break;
1845 }
1846
1847 up_read(&cb_lock);
1848 return ret;
1849 }
1850
genl_unbind(struct net * net,int group)1851 static void genl_unbind(struct net *net, int group)
1852 {
1853 const struct genl_family *family;
1854 unsigned int id;
1855
1856 down_read(&cb_lock);
1857
1858 idr_for_each_entry(&genl_fam_idr, family, id) {
1859 int i;
1860
1861 if (family->n_mcgrps == 0)
1862 continue;
1863
1864 i = group - family->mcgrp_offset;
1865 if (i < 0 || i >= family->n_mcgrps)
1866 continue;
1867
1868 if (family->unbind)
1869 family->unbind(i);
1870
1871 break;
1872 }
1873
1874 up_read(&cb_lock);
1875 }
1876
genl_pernet_init(struct net * net)1877 static int __net_init genl_pernet_init(struct net *net)
1878 {
1879 struct netlink_kernel_cfg cfg = {
1880 .input = genl_rcv,
1881 .flags = NL_CFG_F_NONROOT_RECV,
1882 .bind = genl_bind,
1883 .unbind = genl_unbind,
1884 .release = genl_release,
1885 };
1886
1887 /* we'll bump the group number right afterwards */
1888 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1889
1890 if (!net->genl_sock && net_eq(net, &init_net))
1891 panic("GENL: Cannot initialize generic netlink\n");
1892
1893 if (!net->genl_sock)
1894 return -ENOMEM;
1895
1896 return 0;
1897 }
1898
genl_pernet_exit(struct net * net)1899 static void __net_exit genl_pernet_exit(struct net *net)
1900 {
1901 netlink_kernel_release(net->genl_sock);
1902 net->genl_sock = NULL;
1903 }
1904
1905 static struct pernet_operations genl_pernet_ops = {
1906 .init = genl_pernet_init,
1907 .exit = genl_pernet_exit,
1908 };
1909
genl_init(void)1910 static int __init genl_init(void)
1911 {
1912 int err;
1913
1914 err = genl_register_family(&genl_ctrl);
1915 if (err < 0)
1916 goto problem;
1917
1918 err = register_pernet_subsys(&genl_pernet_ops);
1919 if (err)
1920 goto problem;
1921
1922 return 0;
1923
1924 problem:
1925 panic("GENL: Cannot register controller: %d\n", err);
1926 }
1927
1928 core_initcall(genl_init);
1929
genlmsg_mcast(struct sk_buff * skb,u32 portid,unsigned long group)1930 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
1931 {
1932 struct sk_buff *tmp;
1933 struct net *net, *prev = NULL;
1934 bool delivered = false;
1935 int err;
1936
1937 rcu_read_lock();
1938 for_each_net_rcu(net) {
1939 if (prev) {
1940 tmp = skb_clone(skb, GFP_ATOMIC);
1941 if (!tmp) {
1942 err = -ENOMEM;
1943 goto error;
1944 }
1945 err = nlmsg_multicast(prev->genl_sock, tmp,
1946 portid, group, GFP_ATOMIC);
1947 if (!err)
1948 delivered = true;
1949 else if (err != -ESRCH)
1950 goto error;
1951 }
1952
1953 prev = net;
1954 }
1955 err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
1956
1957 rcu_read_unlock();
1958
1959 if (!err)
1960 delivered = true;
1961 else if (err != -ESRCH)
1962 return err;
1963 return delivered ? 0 : -ESRCH;
1964 error:
1965 rcu_read_unlock();
1966
1967 kfree_skb(skb);
1968 return err;
1969 }
1970
genlmsg_multicast_allns(const struct genl_family * family,struct sk_buff * skb,u32 portid,unsigned int group)1971 int genlmsg_multicast_allns(const struct genl_family *family,
1972 struct sk_buff *skb, u32 portid,
1973 unsigned int group)
1974 {
1975 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1976 return -EINVAL;
1977
1978 group = family->mcgrp_offset + group;
1979 return genlmsg_mcast(skb, portid, group);
1980 }
1981 EXPORT_SYMBOL(genlmsg_multicast_allns);
1982
genl_notify(const struct genl_family * family,struct sk_buff * skb,struct genl_info * info,u32 group,gfp_t flags)1983 void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1984 struct genl_info *info, u32 group, gfp_t flags)
1985 {
1986 struct net *net = genl_info_net(info);
1987 struct sock *sk = net->genl_sock;
1988
1989 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1990 return;
1991
1992 group = family->mcgrp_offset + group;
1993 nlmsg_notify(sk, skb, info->snd_portid, group,
1994 nlmsg_report(info->nlhdr), flags);
1995 }
1996 EXPORT_SYMBOL(genl_notify);
1997