xref: /linux/net/netlink/genetlink.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NETLINK      Generic Netlink Family
4  *
5  * 		Authors:	Jamal Hadi Salim
6  * 				Thomas Graf <tgraf@suug.ch>
7  *				Johannes Berg <johannes@sipsolutions.net>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/string_helpers.h>
17 #include <linux/skbuff.h>
18 #include <linux/mutex.h>
19 #include <linux/bitmap.h>
20 #include <linux/rwsem.h>
21 #include <linux/idr.h>
22 #include <net/sock.h>
23 #include <net/genetlink.h>
24 
25 #include "genetlink.h"
26 
27 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
28 static DECLARE_RWSEM(cb_lock);
29 
30 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
31 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
32 
33 void genl_lock(void)
34 {
35 	mutex_lock(&genl_mutex);
36 }
37 EXPORT_SYMBOL(genl_lock);
38 
39 void genl_unlock(void)
40 {
41 	mutex_unlock(&genl_mutex);
42 }
43 EXPORT_SYMBOL(genl_unlock);
44 
45 static void genl_lock_all(void)
46 {
47 	down_write(&cb_lock);
48 	genl_lock();
49 }
50 
51 static void genl_unlock_all(void)
52 {
53 	genl_unlock();
54 	up_write(&cb_lock);
55 }
56 
57 static void genl_op_lock(const struct genl_family *family)
58 {
59 	if (!family->parallel_ops)
60 		genl_lock();
61 }
62 
63 static void genl_op_unlock(const struct genl_family *family)
64 {
65 	if (!family->parallel_ops)
66 		genl_unlock();
67 }
68 
69 static DEFINE_IDR(genl_fam_idr);
70 
71 /*
72  * Bitmap of multicast groups that are currently in use.
73  *
74  * To avoid an allocation at boot of just one unsigned long,
75  * declare it global instead.
76  * Bit 0 is marked as already used since group 0 is invalid.
77  * Bit 1 is marked as already used since the drop-monitor code
78  * abuses the API and thinks it can statically use group 1.
79  * That group will typically conflict with other groups that
80  * any proper users use.
81  * Bit 16 is marked as used since it's used for generic netlink
82  * and the code no longer marks pre-reserved IDs as used.
83  * Bit 17 is marked as already used since the VFS quota code
84  * also abused this API and relied on family == group ID, we
85  * cater to that by giving it a static family and group ID.
86  * Bit 18 is marked as already used since the PMCRAID driver
87  * did the same thing as the VFS quota code (maybe copied?)
88  */
89 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
90 				      BIT(GENL_ID_VFS_DQUOT) |
91 				      BIT(GENL_ID_PMCRAID);
92 static unsigned long *mc_groups = &mc_group_start;
93 static unsigned long mc_groups_longs = 1;
94 
95 /* We need the last attribute with non-zero ID therefore a 2-entry array */
96 static struct nla_policy genl_policy_reject_all[] = {
97 	{ .type = NLA_REJECT },
98 	{ .type = NLA_REJECT },
99 };
100 
101 static int genl_ctrl_event(int event, const struct genl_family *family,
102 			   const struct genl_multicast_group *grp,
103 			   int grp_id);
104 
105 static void
106 genl_op_fill_in_reject_policy(const struct genl_family *family,
107 			      struct genl_ops *op)
108 {
109 	BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
110 
111 	if (op->policy || op->cmd < family->resv_start_op)
112 		return;
113 
114 	op->policy = genl_policy_reject_all;
115 	op->maxattr = 1;
116 }
117 
118 static void
119 genl_op_fill_in_reject_policy_split(const struct genl_family *family,
120 				    struct genl_split_ops *op)
121 {
122 	if (op->policy)
123 		return;
124 
125 	op->policy = genl_policy_reject_all;
126 	op->maxattr = 1;
127 }
128 
129 static const struct genl_family *genl_family_find_byid(unsigned int id)
130 {
131 	return idr_find(&genl_fam_idr, id);
132 }
133 
134 static const struct genl_family *genl_family_find_byname(char *name)
135 {
136 	const struct genl_family *family;
137 	unsigned int id;
138 
139 	idr_for_each_entry(&genl_fam_idr, family, id)
140 		if (strcmp(family->name, name) == 0)
141 			return family;
142 
143 	return NULL;
144 }
145 
146 struct genl_op_iter {
147 	const struct genl_family *family;
148 	struct genl_split_ops doit;
149 	struct genl_split_ops dumpit;
150 	int cmd_idx;
151 	int entry_idx;
152 	u32 cmd;
153 	u8 flags;
154 };
155 
156 static void genl_op_from_full(const struct genl_family *family,
157 			      unsigned int i, struct genl_ops *op)
158 {
159 	*op = family->ops[i];
160 
161 	if (!op->maxattr)
162 		op->maxattr = family->maxattr;
163 	if (!op->policy)
164 		op->policy = family->policy;
165 
166 	genl_op_fill_in_reject_policy(family, op);
167 }
168 
169 static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
170 			     struct genl_ops *op)
171 {
172 	int i;
173 
174 	for (i = 0; i < family->n_ops; i++)
175 		if (family->ops[i].cmd == cmd) {
176 			genl_op_from_full(family, i, op);
177 			return 0;
178 		}
179 
180 	return -ENOENT;
181 }
182 
183 static void genl_op_from_small(const struct genl_family *family,
184 			       unsigned int i, struct genl_ops *op)
185 {
186 	memset(op, 0, sizeof(*op));
187 	op->doit	= family->small_ops[i].doit;
188 	op->dumpit	= family->small_ops[i].dumpit;
189 	op->cmd		= family->small_ops[i].cmd;
190 	op->internal_flags = family->small_ops[i].internal_flags;
191 	op->flags	= family->small_ops[i].flags;
192 	op->validate	= family->small_ops[i].validate;
193 
194 	op->maxattr = family->maxattr;
195 	op->policy = family->policy;
196 
197 	genl_op_fill_in_reject_policy(family, op);
198 }
199 
200 static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
201 			      struct genl_ops *op)
202 {
203 	int i;
204 
205 	for (i = 0; i < family->n_small_ops; i++)
206 		if (family->small_ops[i].cmd == cmd) {
207 			genl_op_from_small(family, i, op);
208 			return 0;
209 		}
210 
211 	return -ENOENT;
212 }
213 
214 static void genl_op_from_split(struct genl_op_iter *iter)
215 {
216 	const struct genl_family *family = iter->family;
217 	int i, cnt = 0;
218 
219 	i = iter->entry_idx - family->n_ops - family->n_small_ops;
220 
221 	if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
222 		iter->doit = family->split_ops[i + cnt];
223 		genl_op_fill_in_reject_policy_split(family, &iter->doit);
224 		cnt++;
225 	} else {
226 		memset(&iter->doit, 0, sizeof(iter->doit));
227 	}
228 
229 	if (i + cnt < family->n_split_ops &&
230 	    family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
231 	    (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
232 		iter->dumpit = family->split_ops[i + cnt];
233 		genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
234 		cnt++;
235 	} else {
236 		memset(&iter->dumpit, 0, sizeof(iter->dumpit));
237 	}
238 
239 	WARN_ON(!cnt);
240 	iter->entry_idx += cnt;
241 }
242 
243 static int
244 genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
245 		   struct genl_split_ops *op)
246 {
247 	int i;
248 
249 	for (i = 0; i < family->n_split_ops; i++)
250 		if (family->split_ops[i].cmd == cmd &&
251 		    family->split_ops[i].flags & flag) {
252 			*op = family->split_ops[i];
253 			return 0;
254 		}
255 
256 	return -ENOENT;
257 }
258 
259 static int
260 genl_cmd_full_to_split(struct genl_split_ops *op,
261 		       const struct genl_family *family,
262 		       const struct genl_ops *full, u8 flags)
263 {
264 	if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
265 	    (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
266 		memset(op, 0, sizeof(*op));
267 		return -ENOENT;
268 	}
269 
270 	if (flags & GENL_CMD_CAP_DUMP) {
271 		op->start	= full->start;
272 		op->dumpit	= full->dumpit;
273 		op->done	= full->done;
274 	} else {
275 		op->pre_doit	= family->pre_doit;
276 		op->doit	= full->doit;
277 		op->post_doit	= family->post_doit;
278 	}
279 
280 	if (flags & GENL_CMD_CAP_DUMP &&
281 	    full->validate & GENL_DONT_VALIDATE_DUMP) {
282 		op->policy	= NULL;
283 		op->maxattr	= 0;
284 	} else {
285 		op->policy	= full->policy;
286 		op->maxattr	= full->maxattr;
287 	}
288 
289 	op->cmd			= full->cmd;
290 	op->internal_flags	= full->internal_flags;
291 	op->flags		= full->flags;
292 	op->validate		= full->validate;
293 
294 	/* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
295 	op->flags		|= flags;
296 
297 	return 0;
298 }
299 
300 /* Must make sure that op is initialized to 0 on failure */
301 static int
302 genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
303 	     struct genl_split_ops *op)
304 {
305 	struct genl_ops full;
306 	int err;
307 
308 	err = genl_get_cmd_full(cmd, family, &full);
309 	if (err == -ENOENT)
310 		err = genl_get_cmd_small(cmd, family, &full);
311 	/* Found one of legacy forms */
312 	if (err == 0)
313 		return genl_cmd_full_to_split(op, family, &full, flags);
314 
315 	err = genl_get_cmd_split(cmd, flags, family, op);
316 	if (err)
317 		memset(op, 0, sizeof(*op));
318 	return err;
319 }
320 
321 /* For policy dumping only, get ops of both do and dump.
322  * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
323  */
324 static int
325 genl_get_cmd_both(u32 cmd, const struct genl_family *family,
326 		  struct genl_split_ops *doit, struct genl_split_ops *dumpit)
327 {
328 	int err1, err2;
329 
330 	err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
331 	err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
332 
333 	return err1 && err2 ? -ENOENT : 0;
334 }
335 
336 static bool
337 genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
338 {
339 	iter->family = family;
340 	iter->cmd_idx = 0;
341 	iter->entry_idx = 0;
342 
343 	iter->flags = 0;
344 
345 	return iter->family->n_ops +
346 		iter->family->n_small_ops +
347 		iter->family->n_split_ops;
348 }
349 
350 static bool genl_op_iter_next(struct genl_op_iter *iter)
351 {
352 	const struct genl_family *family = iter->family;
353 	bool legacy_op = true;
354 	struct genl_ops op;
355 
356 	if (iter->entry_idx < family->n_ops) {
357 		genl_op_from_full(family, iter->entry_idx, &op);
358 	} else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
359 		genl_op_from_small(family, iter->entry_idx - family->n_ops,
360 				   &op);
361 	} else if (iter->entry_idx <
362 		   family->n_ops + family->n_small_ops + family->n_split_ops) {
363 		legacy_op = false;
364 		/* updates entry_idx */
365 		genl_op_from_split(iter);
366 	} else {
367 		return false;
368 	}
369 
370 	iter->cmd_idx++;
371 
372 	if (legacy_op) {
373 		iter->entry_idx++;
374 
375 		genl_cmd_full_to_split(&iter->doit, family,
376 				       &op, GENL_CMD_CAP_DO);
377 		genl_cmd_full_to_split(&iter->dumpit, family,
378 				       &op, GENL_CMD_CAP_DUMP);
379 	}
380 
381 	iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
382 	iter->flags = iter->doit.flags | iter->dumpit.flags;
383 
384 	return true;
385 }
386 
387 static void
388 genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
389 {
390 	*dst = *src;
391 }
392 
393 static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
394 {
395 	return iter->cmd_idx;
396 }
397 
398 static int genl_allocate_reserve_groups(int n_groups, int *first_id)
399 {
400 	unsigned long *new_groups;
401 	int start = 0;
402 	int i;
403 	int id;
404 	bool fits;
405 
406 	do {
407 		if (start == 0)
408 			id = find_first_zero_bit(mc_groups,
409 						 mc_groups_longs *
410 						 BITS_PER_LONG);
411 		else
412 			id = find_next_zero_bit(mc_groups,
413 						mc_groups_longs * BITS_PER_LONG,
414 						start);
415 
416 		fits = true;
417 		for (i = id;
418 		     i < min_t(int, id + n_groups,
419 			       mc_groups_longs * BITS_PER_LONG);
420 		     i++) {
421 			if (test_bit(i, mc_groups)) {
422 				start = i;
423 				fits = false;
424 				break;
425 			}
426 		}
427 
428 		if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
429 			unsigned long new_longs = mc_groups_longs +
430 						  BITS_TO_LONGS(n_groups);
431 			size_t nlen = new_longs * sizeof(unsigned long);
432 
433 			if (mc_groups == &mc_group_start) {
434 				new_groups = kzalloc(nlen, GFP_KERNEL);
435 				if (!new_groups)
436 					return -ENOMEM;
437 				mc_groups = new_groups;
438 				*mc_groups = mc_group_start;
439 			} else {
440 				new_groups = krealloc(mc_groups, nlen,
441 						      GFP_KERNEL);
442 				if (!new_groups)
443 					return -ENOMEM;
444 				mc_groups = new_groups;
445 				for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
446 					mc_groups[mc_groups_longs + i] = 0;
447 			}
448 			mc_groups_longs = new_longs;
449 		}
450 	} while (!fits);
451 
452 	for (i = id; i < id + n_groups; i++)
453 		set_bit(i, mc_groups);
454 	*first_id = id;
455 	return 0;
456 }
457 
458 static struct genl_family genl_ctrl;
459 
460 static int genl_validate_assign_mc_groups(struct genl_family *family)
461 {
462 	int first_id;
463 	int n_groups = family->n_mcgrps;
464 	int err = 0, i;
465 	bool groups_allocated = false;
466 
467 	if (!n_groups)
468 		return 0;
469 
470 	for (i = 0; i < n_groups; i++) {
471 		const struct genl_multicast_group *grp = &family->mcgrps[i];
472 
473 		if (WARN_ON(grp->name[0] == '\0'))
474 			return -EINVAL;
475 		if (WARN_ON(!string_is_terminated(grp->name, GENL_NAMSIZ)))
476 			return -EINVAL;
477 	}
478 
479 	/* special-case our own group and hacks */
480 	if (family == &genl_ctrl) {
481 		first_id = GENL_ID_CTRL;
482 		BUG_ON(n_groups != 1);
483 	} else if (strcmp(family->name, "NET_DM") == 0) {
484 		first_id = 1;
485 		BUG_ON(n_groups != 1);
486 	} else if (family->id == GENL_ID_VFS_DQUOT) {
487 		first_id = GENL_ID_VFS_DQUOT;
488 		BUG_ON(n_groups != 1);
489 	} else if (family->id == GENL_ID_PMCRAID) {
490 		first_id = GENL_ID_PMCRAID;
491 		BUG_ON(n_groups != 1);
492 	} else {
493 		groups_allocated = true;
494 		err = genl_allocate_reserve_groups(n_groups, &first_id);
495 		if (err)
496 			return err;
497 	}
498 
499 	family->mcgrp_offset = first_id;
500 
501 	/* if still initializing, can't and don't need to realloc bitmaps */
502 	if (!init_net.genl_sock)
503 		return 0;
504 
505 	if (family->netnsok) {
506 		struct net *net;
507 
508 		netlink_table_grab();
509 		rcu_read_lock();
510 		for_each_net_rcu(net) {
511 			err = __netlink_change_ngroups(net->genl_sock,
512 					mc_groups_longs * BITS_PER_LONG);
513 			if (err) {
514 				/*
515 				 * No need to roll back, can only fail if
516 				 * memory allocation fails and then the
517 				 * number of _possible_ groups has been
518 				 * increased on some sockets which is ok.
519 				 */
520 				break;
521 			}
522 		}
523 		rcu_read_unlock();
524 		netlink_table_ungrab();
525 	} else {
526 		err = netlink_change_ngroups(init_net.genl_sock,
527 					     mc_groups_longs * BITS_PER_LONG);
528 	}
529 
530 	if (groups_allocated && err) {
531 		for (i = 0; i < family->n_mcgrps; i++)
532 			clear_bit(family->mcgrp_offset + i, mc_groups);
533 	}
534 
535 	return err;
536 }
537 
538 static void genl_unregister_mc_groups(const struct genl_family *family)
539 {
540 	struct net *net;
541 	int i;
542 
543 	netlink_table_grab();
544 	rcu_read_lock();
545 	for_each_net_rcu(net) {
546 		for (i = 0; i < family->n_mcgrps; i++)
547 			__netlink_clear_multicast_users(
548 				net->genl_sock, family->mcgrp_offset + i);
549 	}
550 	rcu_read_unlock();
551 	netlink_table_ungrab();
552 
553 	for (i = 0; i < family->n_mcgrps; i++) {
554 		int grp_id = family->mcgrp_offset + i;
555 
556 		if (grp_id != 1)
557 			clear_bit(grp_id, mc_groups);
558 		genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
559 				&family->mcgrps[i], grp_id);
560 	}
561 }
562 
563 static bool genl_split_op_check(const struct genl_split_ops *op)
564 {
565 	if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
566 					  GENL_CMD_CAP_DUMP)) != 1))
567 		return true;
568 	return false;
569 }
570 
571 static int genl_validate_ops(const struct genl_family *family)
572 {
573 	struct genl_op_iter i, j;
574 	unsigned int s;
575 
576 	if (WARN_ON(family->n_ops && !family->ops) ||
577 	    WARN_ON(family->n_small_ops && !family->small_ops) ||
578 	    WARN_ON(family->n_split_ops && !family->split_ops))
579 		return -EINVAL;
580 
581 	for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
582 		if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
583 			return -EINVAL;
584 
585 		if (WARN_ON(i.cmd >= family->resv_start_op &&
586 			    (i.doit.validate || i.dumpit.validate)))
587 			return -EINVAL;
588 
589 		genl_op_iter_copy(&j, &i);
590 		while (genl_op_iter_next(&j)) {
591 			if (i.cmd == j.cmd)
592 				return -EINVAL;
593 		}
594 	}
595 
596 	if (family->n_split_ops) {
597 		if (genl_split_op_check(&family->split_ops[0]))
598 			return -EINVAL;
599 	}
600 
601 	for (s = 1; s < family->n_split_ops; s++) {
602 		const struct genl_split_ops *a, *b;
603 
604 		a = &family->split_ops[s - 1];
605 		b = &family->split_ops[s];
606 
607 		if (genl_split_op_check(b))
608 			return -EINVAL;
609 
610 		/* Check sort order */
611 		if (a->cmd < b->cmd) {
612 			continue;
613 		} else if (a->cmd > b->cmd) {
614 			WARN_ON(1);
615 			return -EINVAL;
616 		}
617 
618 		if (a->internal_flags != b->internal_flags ||
619 		    ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
620 					       GENL_CMD_CAP_DUMP))) {
621 			WARN_ON(1);
622 			return -EINVAL;
623 		}
624 
625 		if ((a->flags & GENL_CMD_CAP_DO) &&
626 		    (b->flags & GENL_CMD_CAP_DUMP))
627 			continue;
628 
629 		WARN_ON(1);
630 		return -EINVAL;
631 	}
632 
633 	return 0;
634 }
635 
636 static void *genl_sk_priv_alloc(struct genl_family *family)
637 {
638 	void *priv;
639 
640 	priv = kzalloc(family->sock_priv_size, GFP_KERNEL);
641 	if (!priv)
642 		return ERR_PTR(-ENOMEM);
643 
644 	if (family->sock_priv_init)
645 		family->sock_priv_init(priv);
646 
647 	return priv;
648 }
649 
650 static void genl_sk_priv_free(const struct genl_family *family, void *priv)
651 {
652 	if (family->sock_priv_destroy)
653 		family->sock_priv_destroy(priv);
654 	kfree(priv);
655 }
656 
657 static int genl_sk_privs_alloc(struct genl_family *family)
658 {
659 	if (!family->sock_priv_size)
660 		return 0;
661 
662 	family->sock_privs = kzalloc(sizeof(*family->sock_privs), GFP_KERNEL);
663 	if (!family->sock_privs)
664 		return -ENOMEM;
665 	xa_init(family->sock_privs);
666 	return 0;
667 }
668 
669 static void genl_sk_privs_free(const struct genl_family *family)
670 {
671 	unsigned long id;
672 	void *priv;
673 
674 	if (!family->sock_priv_size)
675 		return;
676 
677 	xa_for_each(family->sock_privs, id, priv)
678 		genl_sk_priv_free(family, priv);
679 
680 	xa_destroy(family->sock_privs);
681 	kfree(family->sock_privs);
682 }
683 
684 static void genl_sk_priv_free_by_sock(struct genl_family *family,
685 				      struct sock *sk)
686 {
687 	void *priv;
688 
689 	if (!family->sock_priv_size)
690 		return;
691 	priv = xa_erase(family->sock_privs, (unsigned long) sk);
692 	if (!priv)
693 		return;
694 	genl_sk_priv_free(family, priv);
695 }
696 
697 static void genl_release(struct sock *sk, unsigned long *groups)
698 {
699 	struct genl_family *family;
700 	unsigned int id;
701 
702 	down_read(&cb_lock);
703 
704 	idr_for_each_entry(&genl_fam_idr, family, id)
705 		genl_sk_priv_free_by_sock(family, sk);
706 
707 	up_read(&cb_lock);
708 }
709 
710 /**
711  * __genl_sk_priv_get - Get family private pointer for socket, if exists
712  *
713  * @family: family
714  * @sk: socket
715  *
716  * Lookup a private memory for a Generic netlink family and specified socket.
717  *
718  * Caller should make sure this is called in RCU read locked section.
719  *
720  * Return: valid pointer on success, otherwise negative error value
721  * encoded by ERR_PTR(), NULL in case priv does not exist.
722  */
723 void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk)
724 {
725 	if (WARN_ON_ONCE(!family->sock_privs))
726 		return ERR_PTR(-EINVAL);
727 	return xa_load(family->sock_privs, (unsigned long) sk);
728 }
729 
730 /**
731  * genl_sk_priv_get - Get family private pointer for socket
732  *
733  * @family: family
734  * @sk: socket
735  *
736  * Lookup a private memory for a Generic netlink family and specified socket.
737  * Allocate the private memory in case it was not already done.
738  *
739  * Return: valid pointer on success, otherwise negative error value
740  * encoded by ERR_PTR().
741  */
742 void *genl_sk_priv_get(struct genl_family *family, struct sock *sk)
743 {
744 	void *priv, *old_priv;
745 
746 	priv = __genl_sk_priv_get(family, sk);
747 	if (priv)
748 		return priv;
749 
750 	/* priv for the family does not exist so far, create it. */
751 
752 	priv = genl_sk_priv_alloc(family);
753 	if (IS_ERR(priv))
754 		return ERR_CAST(priv);
755 
756 	old_priv = xa_cmpxchg(family->sock_privs, (unsigned long) sk, NULL,
757 			      priv, GFP_KERNEL);
758 	if (old_priv) {
759 		genl_sk_priv_free(family, priv);
760 		if (xa_is_err(old_priv))
761 			return ERR_PTR(xa_err(old_priv));
762 		/* Race happened, priv for the socket was already inserted. */
763 		return old_priv;
764 	}
765 	return priv;
766 }
767 
768 /**
769  * genl_register_family - register a generic netlink family
770  * @family: generic netlink family
771  *
772  * Registers the specified family after validating it first. Only one
773  * family may be registered with the same family name or identifier.
774  *
775  * The family's ops, multicast groups and module pointer must already
776  * be assigned.
777  *
778  * Return 0 on success or a negative error code.
779  */
780 int genl_register_family(struct genl_family *family)
781 {
782 	int err, i;
783 	int start = GENL_START_ALLOC, end = GENL_MAX_ID;
784 
785 	err = genl_validate_ops(family);
786 	if (err)
787 		return err;
788 
789 	genl_lock_all();
790 
791 	if (genl_family_find_byname(family->name)) {
792 		err = -EEXIST;
793 		goto errout_locked;
794 	}
795 
796 	err = genl_sk_privs_alloc(family);
797 	if (err)
798 		goto errout_locked;
799 
800 	/*
801 	 * Sadly, a few cases need to be special-cased
802 	 * due to them having previously abused the API
803 	 * and having used their family ID also as their
804 	 * multicast group ID, so we use reserved IDs
805 	 * for both to be sure we can do that mapping.
806 	 */
807 	if (family == &genl_ctrl) {
808 		/* and this needs to be special for initial family lookups */
809 		start = end = GENL_ID_CTRL;
810 	} else if (strcmp(family->name, "pmcraid") == 0) {
811 		start = end = GENL_ID_PMCRAID;
812 	} else if (strcmp(family->name, "VFS_DQUOT") == 0) {
813 		start = end = GENL_ID_VFS_DQUOT;
814 	}
815 
816 	family->id = idr_alloc_cyclic(&genl_fam_idr, family,
817 				      start, end + 1, GFP_KERNEL);
818 	if (family->id < 0) {
819 		err = family->id;
820 		goto errout_sk_privs_free;
821 	}
822 
823 	err = genl_validate_assign_mc_groups(family);
824 	if (err)
825 		goto errout_remove;
826 
827 	genl_unlock_all();
828 
829 	/* send all events */
830 	genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
831 	for (i = 0; i < family->n_mcgrps; i++)
832 		genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
833 				&family->mcgrps[i], family->mcgrp_offset + i);
834 
835 	return 0;
836 
837 errout_remove:
838 	idr_remove(&genl_fam_idr, family->id);
839 errout_sk_privs_free:
840 	genl_sk_privs_free(family);
841 errout_locked:
842 	genl_unlock_all();
843 	return err;
844 }
845 EXPORT_SYMBOL(genl_register_family);
846 
847 /**
848  * genl_unregister_family - unregister generic netlink family
849  * @family: generic netlink family
850  *
851  * Unregisters the specified family.
852  *
853  * Returns 0 on success or a negative error code.
854  */
855 int genl_unregister_family(const struct genl_family *family)
856 {
857 	genl_lock_all();
858 
859 	if (!genl_family_find_byid(family->id)) {
860 		genl_unlock_all();
861 		return -ENOENT;
862 	}
863 
864 	genl_unregister_mc_groups(family);
865 
866 	idr_remove(&genl_fam_idr, family->id);
867 
868 	up_write(&cb_lock);
869 	wait_event(genl_sk_destructing_waitq,
870 		   atomic_read(&genl_sk_destructing_cnt) == 0);
871 
872 	genl_sk_privs_free(family);
873 
874 	genl_unlock();
875 
876 	genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
877 
878 	return 0;
879 }
880 EXPORT_SYMBOL(genl_unregister_family);
881 
882 /**
883  * genlmsg_put - Add generic netlink header to netlink message
884  * @skb: socket buffer holding the message
885  * @portid: netlink portid the message is addressed to
886  * @seq: sequence number (usually the one of the sender)
887  * @family: generic netlink family
888  * @flags: netlink message flags
889  * @cmd: generic netlink command
890  *
891  * Returns pointer to user specific header
892  */
893 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
894 		  const struct genl_family *family, int flags, u8 cmd)
895 {
896 	struct nlmsghdr *nlh;
897 	struct genlmsghdr *hdr;
898 
899 	nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
900 			family->hdrsize, flags);
901 	if (nlh == NULL)
902 		return NULL;
903 
904 	hdr = nlmsg_data(nlh);
905 	hdr->cmd = cmd;
906 	hdr->version = family->version;
907 	hdr->reserved = 0;
908 
909 	return (char *) hdr + GENL_HDRLEN;
910 }
911 EXPORT_SYMBOL(genlmsg_put);
912 
913 static struct genl_dumpit_info *genl_dumpit_info_alloc(void)
914 {
915 	return kmalloc(sizeof(struct genl_dumpit_info), GFP_KERNEL);
916 }
917 
918 static void genl_dumpit_info_free(const struct genl_dumpit_info *info)
919 {
920 	kfree(info);
921 }
922 
923 static struct nlattr **
924 genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
925 				struct nlmsghdr *nlh,
926 				struct netlink_ext_ack *extack,
927 				const struct genl_split_ops *ops,
928 				int hdrlen,
929 				enum genl_validate_flags no_strict_flag)
930 {
931 	enum netlink_validation validate = ops->validate & no_strict_flag ?
932 					   NL_VALIDATE_LIBERAL :
933 					   NL_VALIDATE_STRICT;
934 	struct nlattr **attrbuf;
935 	int err;
936 
937 	if (!ops->maxattr)
938 		return NULL;
939 
940 	attrbuf = kmalloc_array(ops->maxattr + 1,
941 				sizeof(struct nlattr *), GFP_KERNEL);
942 	if (!attrbuf)
943 		return ERR_PTR(-ENOMEM);
944 
945 	err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
946 			    validate, extack);
947 	if (err) {
948 		kfree(attrbuf);
949 		return ERR_PTR(err);
950 	}
951 	return attrbuf;
952 }
953 
954 static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
955 {
956 	kfree(attrbuf);
957 }
958 
959 struct genl_start_context {
960 	const struct genl_family *family;
961 	struct nlmsghdr *nlh;
962 	struct netlink_ext_ack *extack;
963 	const struct genl_split_ops *ops;
964 	int hdrlen;
965 };
966 
967 static int genl_start(struct netlink_callback *cb)
968 {
969 	struct genl_start_context *ctx = cb->data;
970 	const struct genl_split_ops *ops;
971 	struct genl_dumpit_info *info;
972 	struct nlattr **attrs = NULL;
973 	int rc = 0;
974 
975 	ops = ctx->ops;
976 	if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
977 	    ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
978 		return -EINVAL;
979 
980 	attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
981 						ops, ctx->hdrlen,
982 						GENL_DONT_VALIDATE_DUMP_STRICT);
983 	if (IS_ERR(attrs))
984 		return PTR_ERR(attrs);
985 
986 	info = genl_dumpit_info_alloc();
987 	if (!info) {
988 		genl_family_rcv_msg_attrs_free(attrs);
989 		return -ENOMEM;
990 	}
991 	info->op = *ops;
992 	info->info.family	= ctx->family;
993 	info->info.snd_seq	= cb->nlh->nlmsg_seq;
994 	info->info.snd_portid	= NETLINK_CB(cb->skb).portid;
995 	info->info.nlhdr	= cb->nlh;
996 	info->info.genlhdr	= nlmsg_data(cb->nlh);
997 	info->info.attrs	= attrs;
998 	genl_info_net_set(&info->info, sock_net(cb->skb->sk));
999 	info->info.extack	= cb->extack;
1000 	memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr));
1001 
1002 	cb->data = info;
1003 	if (ops->start) {
1004 		genl_op_lock(ctx->family);
1005 		rc = ops->start(cb);
1006 		genl_op_unlock(ctx->family);
1007 	}
1008 
1009 	if (rc) {
1010 		genl_family_rcv_msg_attrs_free(info->info.attrs);
1011 		genl_dumpit_info_free(info);
1012 		cb->data = NULL;
1013 	}
1014 	return rc;
1015 }
1016 
1017 static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1018 {
1019 	struct genl_dumpit_info *dump_info = cb->data;
1020 	const struct genl_split_ops *ops = &dump_info->op;
1021 	struct genl_info *info = &dump_info->info;
1022 	int rc;
1023 
1024 	info->extack = cb->extack;
1025 
1026 	genl_op_lock(info->family);
1027 	rc = ops->dumpit(skb, cb);
1028 	genl_op_unlock(info->family);
1029 	return rc;
1030 }
1031 
1032 static int genl_done(struct netlink_callback *cb)
1033 {
1034 	struct genl_dumpit_info *dump_info = cb->data;
1035 	const struct genl_split_ops *ops = &dump_info->op;
1036 	struct genl_info *info = &dump_info->info;
1037 	int rc = 0;
1038 
1039 	info->extack = cb->extack;
1040 
1041 	if (ops->done) {
1042 		genl_op_lock(info->family);
1043 		rc = ops->done(cb);
1044 		genl_op_unlock(info->family);
1045 	}
1046 	genl_family_rcv_msg_attrs_free(info->attrs);
1047 	genl_dumpit_info_free(dump_info);
1048 	return rc;
1049 }
1050 
1051 static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
1052 				      struct sk_buff *skb,
1053 				      struct nlmsghdr *nlh,
1054 				      struct netlink_ext_ack *extack,
1055 				      const struct genl_split_ops *ops,
1056 				      int hdrlen, struct net *net)
1057 {
1058 	struct genl_start_context ctx;
1059 	struct netlink_dump_control c = {
1060 		.module = family->module,
1061 		.data = &ctx,
1062 		.start = genl_start,
1063 		.dump = genl_dumpit,
1064 		.done = genl_done,
1065 		.extack = extack,
1066 	};
1067 	int err;
1068 
1069 	ctx.family = family;
1070 	ctx.nlh = nlh;
1071 	ctx.extack = extack;
1072 	ctx.ops = ops;
1073 	ctx.hdrlen = hdrlen;
1074 
1075 	genl_op_unlock(family);
1076 	err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1077 	genl_op_lock(family);
1078 
1079 	return err;
1080 }
1081 
1082 static int genl_family_rcv_msg_doit(const struct genl_family *family,
1083 				    struct sk_buff *skb,
1084 				    struct nlmsghdr *nlh,
1085 				    struct netlink_ext_ack *extack,
1086 				    const struct genl_split_ops *ops,
1087 				    int hdrlen, struct net *net)
1088 {
1089 	struct nlattr **attrbuf;
1090 	struct genl_info info;
1091 	int err;
1092 
1093 	attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
1094 						  ops, hdrlen,
1095 						  GENL_DONT_VALIDATE_STRICT);
1096 	if (IS_ERR(attrbuf))
1097 		return PTR_ERR(attrbuf);
1098 
1099 	info.snd_seq = nlh->nlmsg_seq;
1100 	info.snd_portid = NETLINK_CB(skb).portid;
1101 	info.family = family;
1102 	info.nlhdr = nlh;
1103 	info.genlhdr = nlmsg_data(nlh);
1104 	info.attrs = attrbuf;
1105 	info.extack = extack;
1106 	genl_info_net_set(&info, net);
1107 	memset(&info.user_ptr, 0, sizeof(info.user_ptr));
1108 
1109 	if (ops->pre_doit) {
1110 		err = ops->pre_doit(ops, skb, &info);
1111 		if (err)
1112 			goto out;
1113 	}
1114 
1115 	err = ops->doit(skb, &info);
1116 
1117 	if (ops->post_doit)
1118 		ops->post_doit(ops, skb, &info);
1119 
1120 out:
1121 	genl_family_rcv_msg_attrs_free(attrbuf);
1122 
1123 	return err;
1124 }
1125 
1126 static int genl_header_check(const struct genl_family *family,
1127 			     struct nlmsghdr *nlh, struct genlmsghdr *hdr,
1128 			     struct netlink_ext_ack *extack)
1129 {
1130 	u16 flags;
1131 
1132 	/* Only for commands added after we started validating */
1133 	if (hdr->cmd < family->resv_start_op)
1134 		return 0;
1135 
1136 	if (hdr->reserved) {
1137 		NL_SET_ERR_MSG(extack, "genlmsghdr.reserved field is not 0");
1138 		return -EINVAL;
1139 	}
1140 
1141 	/* Old netlink flags have pretty loose semantics, allow only the flags
1142 	 * consumed by the core where we can enforce the meaning.
1143 	 */
1144 	flags = nlh->nlmsg_flags;
1145 	if ((flags & NLM_F_DUMP) == NLM_F_DUMP) /* DUMP is 2 bits */
1146 		flags &= ~NLM_F_DUMP;
1147 	if (flags & ~(NLM_F_REQUEST | NLM_F_ACK | NLM_F_ECHO)) {
1148 		NL_SET_ERR_MSG(extack,
1149 			       "ambiguous or reserved bits set in nlmsg_flags");
1150 		return -EINVAL;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static int genl_family_rcv_msg(const struct genl_family *family,
1157 			       struct sk_buff *skb,
1158 			       struct nlmsghdr *nlh,
1159 			       struct netlink_ext_ack *extack)
1160 {
1161 	struct net *net = sock_net(skb->sk);
1162 	struct genlmsghdr *hdr = nlmsg_data(nlh);
1163 	struct genl_split_ops op;
1164 	int hdrlen;
1165 	u8 flags;
1166 
1167 	/* this family doesn't exist in this netns */
1168 	if (!family->netnsok && !net_eq(net, &init_net))
1169 		return -ENOENT;
1170 
1171 	hdrlen = GENL_HDRLEN + family->hdrsize;
1172 	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
1173 		return -EINVAL;
1174 
1175 	if (genl_header_check(family, nlh, hdr, extack))
1176 		return -EINVAL;
1177 
1178 	flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
1179 		GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
1180 	if (genl_get_cmd(hdr->cmd, flags, family, &op))
1181 		return -EOPNOTSUPP;
1182 
1183 	if ((op.flags & GENL_ADMIN_PERM) &&
1184 	    !netlink_capable(skb, CAP_NET_ADMIN))
1185 		return -EPERM;
1186 
1187 	if ((op.flags & GENL_UNS_ADMIN_PERM) &&
1188 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1189 		return -EPERM;
1190 
1191 	if (flags & GENL_CMD_CAP_DUMP)
1192 		return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
1193 						  &op, hdrlen, net);
1194 	else
1195 		return genl_family_rcv_msg_doit(family, skb, nlh, extack,
1196 						&op, hdrlen, net);
1197 }
1198 
1199 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
1200 			struct netlink_ext_ack *extack)
1201 {
1202 	const struct genl_family *family;
1203 	int err;
1204 
1205 	family = genl_family_find_byid(nlh->nlmsg_type);
1206 	if (family == NULL)
1207 		return -ENOENT;
1208 
1209 	genl_op_lock(family);
1210 	err = genl_family_rcv_msg(family, skb, nlh, extack);
1211 	genl_op_unlock(family);
1212 
1213 	return err;
1214 }
1215 
1216 static void genl_rcv(struct sk_buff *skb)
1217 {
1218 	down_read(&cb_lock);
1219 	netlink_rcv_skb(skb, &genl_rcv_msg);
1220 	up_read(&cb_lock);
1221 }
1222 
1223 /**************************************************************************
1224  * Controller
1225  **************************************************************************/
1226 
1227 static struct genl_family genl_ctrl;
1228 
1229 static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
1230 			  u32 flags, struct sk_buff *skb, u8 cmd)
1231 {
1232 	struct genl_op_iter i;
1233 	void *hdr;
1234 
1235 	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1236 	if (hdr == NULL)
1237 		return -EMSGSIZE;
1238 
1239 	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1240 	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
1241 	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
1242 	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
1243 	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
1244 		goto nla_put_failure;
1245 
1246 	if (genl_op_iter_init(family, &i)) {
1247 		struct nlattr *nla_ops;
1248 
1249 		nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
1250 		if (nla_ops == NULL)
1251 			goto nla_put_failure;
1252 
1253 		while (genl_op_iter_next(&i)) {
1254 			struct nlattr *nest;
1255 			u32 op_flags;
1256 
1257 			op_flags = i.flags;
1258 			if (i.doit.policy || i.dumpit.policy)
1259 				op_flags |= GENL_CMD_CAP_HASPOL;
1260 
1261 			nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
1262 			if (nest == NULL)
1263 				goto nla_put_failure;
1264 
1265 			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
1266 			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
1267 				goto nla_put_failure;
1268 
1269 			nla_nest_end(skb, nest);
1270 		}
1271 
1272 		nla_nest_end(skb, nla_ops);
1273 	}
1274 
1275 	if (family->n_mcgrps) {
1276 		struct nlattr *nla_grps;
1277 		int i;
1278 
1279 		nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1280 		if (nla_grps == NULL)
1281 			goto nla_put_failure;
1282 
1283 		for (i = 0; i < family->n_mcgrps; i++) {
1284 			struct nlattr *nest;
1285 			const struct genl_multicast_group *grp;
1286 
1287 			grp = &family->mcgrps[i];
1288 
1289 			nest = nla_nest_start_noflag(skb, i + 1);
1290 			if (nest == NULL)
1291 				goto nla_put_failure;
1292 
1293 			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
1294 					family->mcgrp_offset + i) ||
1295 			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1296 					   grp->name))
1297 				goto nla_put_failure;
1298 
1299 			nla_nest_end(skb, nest);
1300 		}
1301 		nla_nest_end(skb, nla_grps);
1302 	}
1303 
1304 	genlmsg_end(skb, hdr);
1305 	return 0;
1306 
1307 nla_put_failure:
1308 	genlmsg_cancel(skb, hdr);
1309 	return -EMSGSIZE;
1310 }
1311 
1312 static int ctrl_fill_mcgrp_info(const struct genl_family *family,
1313 				const struct genl_multicast_group *grp,
1314 				int grp_id, u32 portid, u32 seq, u32 flags,
1315 				struct sk_buff *skb, u8 cmd)
1316 {
1317 	void *hdr;
1318 	struct nlattr *nla_grps;
1319 	struct nlattr *nest;
1320 
1321 	hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
1322 	if (hdr == NULL)
1323 		return -1;
1324 
1325 	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
1326 	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
1327 		goto nla_put_failure;
1328 
1329 	nla_grps = nla_nest_start_noflag(skb, CTRL_ATTR_MCAST_GROUPS);
1330 	if (nla_grps == NULL)
1331 		goto nla_put_failure;
1332 
1333 	nest = nla_nest_start_noflag(skb, 1);
1334 	if (nest == NULL)
1335 		goto nla_put_failure;
1336 
1337 	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
1338 	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
1339 			   grp->name))
1340 		goto nla_put_failure;
1341 
1342 	nla_nest_end(skb, nest);
1343 	nla_nest_end(skb, nla_grps);
1344 
1345 	genlmsg_end(skb, hdr);
1346 	return 0;
1347 
1348 nla_put_failure:
1349 	genlmsg_cancel(skb, hdr);
1350 	return -EMSGSIZE;
1351 }
1352 
1353 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
1354 {
1355 	int n = 0;
1356 	struct genl_family *rt;
1357 	struct net *net = sock_net(skb->sk);
1358 	int fams_to_skip = cb->args[0];
1359 	unsigned int id;
1360 	int err = 0;
1361 
1362 	idr_for_each_entry(&genl_fam_idr, rt, id) {
1363 		if (!rt->netnsok && !net_eq(net, &init_net))
1364 			continue;
1365 
1366 		if (n++ < fams_to_skip)
1367 			continue;
1368 
1369 		err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
1370 				     cb->nlh->nlmsg_seq, NLM_F_MULTI,
1371 				     skb, CTRL_CMD_NEWFAMILY);
1372 		if (err) {
1373 			n--;
1374 			break;
1375 		}
1376 	}
1377 
1378 	cb->args[0] = n;
1379 	return err;
1380 }
1381 
1382 static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
1383 					     u32 portid, int seq, u8 cmd)
1384 {
1385 	struct sk_buff *skb;
1386 	int err;
1387 
1388 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1389 	if (skb == NULL)
1390 		return ERR_PTR(-ENOBUFS);
1391 
1392 	err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
1393 	if (err < 0) {
1394 		nlmsg_free(skb);
1395 		return ERR_PTR(err);
1396 	}
1397 
1398 	return skb;
1399 }
1400 
1401 static struct sk_buff *
1402 ctrl_build_mcgrp_msg(const struct genl_family *family,
1403 		     const struct genl_multicast_group *grp,
1404 		     int grp_id, u32 portid, int seq, u8 cmd)
1405 {
1406 	struct sk_buff *skb;
1407 	int err;
1408 
1409 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1410 	if (skb == NULL)
1411 		return ERR_PTR(-ENOBUFS);
1412 
1413 	err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
1414 				   seq, 0, skb, cmd);
1415 	if (err < 0) {
1416 		nlmsg_free(skb);
1417 		return ERR_PTR(err);
1418 	}
1419 
1420 	return skb;
1421 }
1422 
1423 static const struct nla_policy ctrl_policy_family[] = {
1424 	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
1425 	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
1426 				    .len = GENL_NAMSIZ - 1 },
1427 };
1428 
1429 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
1430 {
1431 	struct sk_buff *msg;
1432 	const struct genl_family *res = NULL;
1433 	int err = -EINVAL;
1434 
1435 	if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
1436 		u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
1437 		res = genl_family_find_byid(id);
1438 		err = -ENOENT;
1439 	}
1440 
1441 	if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
1442 		char *name;
1443 
1444 		name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
1445 		res = genl_family_find_byname(name);
1446 #ifdef CONFIG_MODULES
1447 		if (res == NULL) {
1448 			genl_unlock();
1449 			up_read(&cb_lock);
1450 			request_module("net-pf-%d-proto-%d-family-%s",
1451 				       PF_NETLINK, NETLINK_GENERIC, name);
1452 			down_read(&cb_lock);
1453 			genl_lock();
1454 			res = genl_family_find_byname(name);
1455 		}
1456 #endif
1457 		err = -ENOENT;
1458 	}
1459 
1460 	if (res == NULL)
1461 		return err;
1462 
1463 	if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
1464 		/* family doesn't exist here */
1465 		return -ENOENT;
1466 	}
1467 
1468 	msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
1469 				    CTRL_CMD_NEWFAMILY);
1470 	if (IS_ERR(msg))
1471 		return PTR_ERR(msg);
1472 
1473 	return genlmsg_reply(msg, info);
1474 }
1475 
1476 static int genl_ctrl_event(int event, const struct genl_family *family,
1477 			   const struct genl_multicast_group *grp,
1478 			   int grp_id)
1479 {
1480 	struct sk_buff *msg;
1481 
1482 	/* genl is still initialising */
1483 	if (!init_net.genl_sock)
1484 		return 0;
1485 
1486 	switch (event) {
1487 	case CTRL_CMD_NEWFAMILY:
1488 	case CTRL_CMD_DELFAMILY:
1489 		WARN_ON(grp);
1490 		msg = ctrl_build_family_msg(family, 0, 0, event);
1491 		break;
1492 	case CTRL_CMD_NEWMCAST_GRP:
1493 	case CTRL_CMD_DELMCAST_GRP:
1494 		BUG_ON(!grp);
1495 		msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
1496 		break;
1497 	default:
1498 		return -EINVAL;
1499 	}
1500 
1501 	if (IS_ERR(msg))
1502 		return PTR_ERR(msg);
1503 
1504 	if (!family->netnsok) {
1505 		genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
1506 					0, GFP_KERNEL);
1507 	} else {
1508 		rcu_read_lock();
1509 		genlmsg_multicast_allns(&genl_ctrl, msg, 0,
1510 					0, GFP_ATOMIC);
1511 		rcu_read_unlock();
1512 	}
1513 
1514 	return 0;
1515 }
1516 
1517 struct ctrl_dump_policy_ctx {
1518 	struct netlink_policy_dump_state *state;
1519 	const struct genl_family *rt;
1520 	struct genl_op_iter *op_iter;
1521 	u32 op;
1522 	u16 fam_id;
1523 	u8 dump_map:1,
1524 	   single_op:1;
1525 };
1526 
1527 static const struct nla_policy ctrl_policy_policy[] = {
1528 	[CTRL_ATTR_FAMILY_ID]	= { .type = NLA_U16 },
1529 	[CTRL_ATTR_FAMILY_NAME]	= { .type = NLA_NUL_STRING,
1530 				    .len = GENL_NAMSIZ - 1 },
1531 	[CTRL_ATTR_OP]		= { .type = NLA_U32 },
1532 };
1533 
1534 static int ctrl_dumppolicy_start(struct netlink_callback *cb)
1535 {
1536 	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
1537 	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1538 	struct nlattr **tb = info->info.attrs;
1539 	const struct genl_family *rt;
1540 	struct genl_op_iter i;
1541 	int err;
1542 
1543 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
1544 
1545 	if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
1546 		return -EINVAL;
1547 
1548 	if (tb[CTRL_ATTR_FAMILY_ID]) {
1549 		ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
1550 	} else {
1551 		rt = genl_family_find_byname(
1552 			nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
1553 		if (!rt)
1554 			return -ENOENT;
1555 		ctx->fam_id = rt->id;
1556 	}
1557 
1558 	rt = genl_family_find_byid(ctx->fam_id);
1559 	if (!rt)
1560 		return -ENOENT;
1561 
1562 	ctx->rt = rt;
1563 
1564 	if (tb[CTRL_ATTR_OP]) {
1565 		struct genl_split_ops doit, dump;
1566 
1567 		ctx->single_op = true;
1568 		ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
1569 
1570 		err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
1571 		if (err) {
1572 			NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
1573 			return err;
1574 		}
1575 
1576 		if (doit.policy) {
1577 			err = netlink_policy_dump_add_policy(&ctx->state,
1578 							     doit.policy,
1579 							     doit.maxattr);
1580 			if (err)
1581 				goto err_free_state;
1582 		}
1583 		if (dump.policy) {
1584 			err = netlink_policy_dump_add_policy(&ctx->state,
1585 							     dump.policy,
1586 							     dump.maxattr);
1587 			if (err)
1588 				goto err_free_state;
1589 		}
1590 
1591 		if (!ctx->state)
1592 			return -ENODATA;
1593 
1594 		ctx->dump_map = 1;
1595 		return 0;
1596 	}
1597 
1598 	ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
1599 	if (!ctx->op_iter)
1600 		return -ENOMEM;
1601 
1602 	genl_op_iter_init(rt, ctx->op_iter);
1603 	ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1604 
1605 	for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
1606 		if (i.doit.policy) {
1607 			err = netlink_policy_dump_add_policy(&ctx->state,
1608 							     i.doit.policy,
1609 							     i.doit.maxattr);
1610 			if (err)
1611 				goto err_free_state;
1612 		}
1613 		if (i.dumpit.policy) {
1614 			err = netlink_policy_dump_add_policy(&ctx->state,
1615 							     i.dumpit.policy,
1616 							     i.dumpit.maxattr);
1617 			if (err)
1618 				goto err_free_state;
1619 		}
1620 	}
1621 
1622 	if (!ctx->state) {
1623 		err = -ENODATA;
1624 		goto err_free_op_iter;
1625 	}
1626 	return 0;
1627 
1628 err_free_state:
1629 	netlink_policy_dump_free(ctx->state);
1630 err_free_op_iter:
1631 	kfree(ctx->op_iter);
1632 	return err;
1633 }
1634 
1635 static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
1636 				  struct netlink_callback *cb)
1637 {
1638 	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1639 	void *hdr;
1640 
1641 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1642 			  cb->nlh->nlmsg_seq, &genl_ctrl,
1643 			  NLM_F_MULTI, CTRL_CMD_GETPOLICY);
1644 	if (!hdr)
1645 		return NULL;
1646 
1647 	if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
1648 		return NULL;
1649 
1650 	return hdr;
1651 }
1652 
1653 static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
1654 				  struct netlink_callback *cb,
1655 				  struct genl_split_ops *doit,
1656 				  struct genl_split_ops *dumpit)
1657 {
1658 	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1659 	struct nlattr *nest_pol, *nest_op;
1660 	void *hdr;
1661 	int idx;
1662 
1663 	/* skip if we have nothing to show */
1664 	if (!doit->policy && !dumpit->policy)
1665 		return 0;
1666 
1667 	hdr = ctrl_dumppolicy_prep(skb, cb);
1668 	if (!hdr)
1669 		return -ENOBUFS;
1670 
1671 	nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
1672 	if (!nest_pol)
1673 		goto err;
1674 
1675 	nest_op = nla_nest_start(skb, doit->cmd);
1676 	if (!nest_op)
1677 		goto err;
1678 
1679 	if (doit->policy) {
1680 		idx = netlink_policy_dump_get_policy_idx(ctx->state,
1681 							 doit->policy,
1682 							 doit->maxattr);
1683 
1684 		if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
1685 			goto err;
1686 	}
1687 	if (dumpit->policy) {
1688 		idx = netlink_policy_dump_get_policy_idx(ctx->state,
1689 							 dumpit->policy,
1690 							 dumpit->maxattr);
1691 
1692 		if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
1693 			goto err;
1694 	}
1695 
1696 	nla_nest_end(skb, nest_op);
1697 	nla_nest_end(skb, nest_pol);
1698 	genlmsg_end(skb, hdr);
1699 
1700 	return 0;
1701 err:
1702 	genlmsg_cancel(skb, hdr);
1703 	return -ENOBUFS;
1704 }
1705 
1706 static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
1707 {
1708 	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1709 	void *hdr;
1710 
1711 	if (ctx->dump_map) {
1712 		if (ctx->single_op) {
1713 			struct genl_split_ops doit, dumpit;
1714 
1715 			if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
1716 						      &doit, &dumpit)))
1717 				return -ENOENT;
1718 
1719 			if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
1720 				return skb->len;
1721 
1722 			/* done with the per-op policy index list */
1723 			ctx->dump_map = 0;
1724 		}
1725 
1726 		while (ctx->dump_map) {
1727 			if (ctrl_dumppolicy_put_op(skb, cb,
1728 						   &ctx->op_iter->doit,
1729 						   &ctx->op_iter->dumpit))
1730 				return skb->len;
1731 
1732 			ctx->dump_map = genl_op_iter_next(ctx->op_iter);
1733 		}
1734 	}
1735 
1736 	while (netlink_policy_dump_loop(ctx->state)) {
1737 		struct nlattr *nest;
1738 
1739 		hdr = ctrl_dumppolicy_prep(skb, cb);
1740 		if (!hdr)
1741 			goto nla_put_failure;
1742 
1743 		nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
1744 		if (!nest)
1745 			goto nla_put_failure;
1746 
1747 		if (netlink_policy_dump_write(skb, ctx->state))
1748 			goto nla_put_failure;
1749 
1750 		nla_nest_end(skb, nest);
1751 
1752 		genlmsg_end(skb, hdr);
1753 	}
1754 
1755 	return skb->len;
1756 
1757 nla_put_failure:
1758 	genlmsg_cancel(skb, hdr);
1759 	return skb->len;
1760 }
1761 
1762 static int ctrl_dumppolicy_done(struct netlink_callback *cb)
1763 {
1764 	struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
1765 
1766 	kfree(ctx->op_iter);
1767 	netlink_policy_dump_free(ctx->state);
1768 	return 0;
1769 }
1770 
1771 static const struct genl_split_ops genl_ctrl_ops[] = {
1772 	{
1773 		.cmd		= CTRL_CMD_GETFAMILY,
1774 		.validate	= GENL_DONT_VALIDATE_STRICT,
1775 		.policy		= ctrl_policy_family,
1776 		.maxattr	= ARRAY_SIZE(ctrl_policy_family) - 1,
1777 		.doit		= ctrl_getfamily,
1778 		.flags		= GENL_CMD_CAP_DO,
1779 	},
1780 	{
1781 		.cmd		= CTRL_CMD_GETFAMILY,
1782 		.validate	= GENL_DONT_VALIDATE_DUMP,
1783 		.policy		= ctrl_policy_family,
1784 		.maxattr	= ARRAY_SIZE(ctrl_policy_family) - 1,
1785 		.dumpit		= ctrl_dumpfamily,
1786 		.flags		= GENL_CMD_CAP_DUMP,
1787 	},
1788 	{
1789 		.cmd		= CTRL_CMD_GETPOLICY,
1790 		.policy		= ctrl_policy_policy,
1791 		.maxattr	= ARRAY_SIZE(ctrl_policy_policy) - 1,
1792 		.start		= ctrl_dumppolicy_start,
1793 		.dumpit		= ctrl_dumppolicy,
1794 		.done		= ctrl_dumppolicy_done,
1795 		.flags		= GENL_CMD_CAP_DUMP,
1796 	},
1797 };
1798 
1799 static const struct genl_multicast_group genl_ctrl_groups[] = {
1800 	{ .name = "notify", },
1801 };
1802 
1803 static struct genl_family genl_ctrl __ro_after_init = {
1804 	.module = THIS_MODULE,
1805 	.split_ops = genl_ctrl_ops,
1806 	.n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
1807 	.resv_start_op = CTRL_CMD_GETPOLICY + 1,
1808 	.mcgrps = genl_ctrl_groups,
1809 	.n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
1810 	.id = GENL_ID_CTRL,
1811 	.name = "nlctrl",
1812 	.version = 0x2,
1813 	.netnsok = true,
1814 };
1815 
1816 static int genl_bind(struct net *net, int group)
1817 {
1818 	const struct genl_family *family;
1819 	unsigned int id;
1820 	int ret = 0;
1821 
1822 	down_read(&cb_lock);
1823 
1824 	idr_for_each_entry(&genl_fam_idr, family, id) {
1825 		const struct genl_multicast_group *grp;
1826 		int i;
1827 
1828 		if (family->n_mcgrps == 0)
1829 			continue;
1830 
1831 		i = group - family->mcgrp_offset;
1832 		if (i < 0 || i >= family->n_mcgrps)
1833 			continue;
1834 
1835 		grp = &family->mcgrps[i];
1836 		if ((grp->flags & GENL_MCAST_CAP_NET_ADMIN) &&
1837 		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
1838 			ret = -EPERM;
1839 		if ((grp->flags & GENL_MCAST_CAP_SYS_ADMIN) &&
1840 		    !ns_capable(net->user_ns, CAP_SYS_ADMIN))
1841 			ret = -EPERM;
1842 
1843 		if (family->bind)
1844 			family->bind(i);
1845 
1846 		break;
1847 	}
1848 
1849 	up_read(&cb_lock);
1850 	return ret;
1851 }
1852 
1853 static void genl_unbind(struct net *net, int group)
1854 {
1855 	const struct genl_family *family;
1856 	unsigned int id;
1857 
1858 	down_read(&cb_lock);
1859 
1860 	idr_for_each_entry(&genl_fam_idr, family, id) {
1861 		int i;
1862 
1863 		if (family->n_mcgrps == 0)
1864 			continue;
1865 
1866 		i = group - family->mcgrp_offset;
1867 		if (i < 0 || i >= family->n_mcgrps)
1868 			continue;
1869 
1870 		if (family->unbind)
1871 			family->unbind(i);
1872 
1873 		break;
1874 	}
1875 
1876 	up_read(&cb_lock);
1877 }
1878 
1879 static int __net_init genl_pernet_init(struct net *net)
1880 {
1881 	struct netlink_kernel_cfg cfg = {
1882 		.input		= genl_rcv,
1883 		.flags		= NL_CFG_F_NONROOT_RECV,
1884 		.bind		= genl_bind,
1885 		.unbind		= genl_unbind,
1886 		.release	= genl_release,
1887 	};
1888 
1889 	/* we'll bump the group number right afterwards */
1890 	net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1891 
1892 	if (!net->genl_sock && net_eq(net, &init_net))
1893 		panic("GENL: Cannot initialize generic netlink\n");
1894 
1895 	if (!net->genl_sock)
1896 		return -ENOMEM;
1897 
1898 	return 0;
1899 }
1900 
1901 static void __net_exit genl_pernet_exit(struct net *net)
1902 {
1903 	netlink_kernel_release(net->genl_sock);
1904 	net->genl_sock = NULL;
1905 }
1906 
1907 static struct pernet_operations genl_pernet_ops = {
1908 	.init = genl_pernet_init,
1909 	.exit = genl_pernet_exit,
1910 };
1911 
1912 static int __init genl_init(void)
1913 {
1914 	int err;
1915 
1916 	err = genl_register_family(&genl_ctrl);
1917 	if (err < 0)
1918 		goto problem;
1919 
1920 	err = register_pernet_subsys(&genl_pernet_ops);
1921 	if (err)
1922 		goto problem;
1923 
1924 	return 0;
1925 
1926 problem:
1927 	panic("GENL: Cannot register controller: %d\n", err);
1928 }
1929 
1930 core_initcall(genl_init);
1931 
1932 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1933 			 gfp_t flags)
1934 {
1935 	struct sk_buff *tmp;
1936 	struct net *net, *prev = NULL;
1937 	bool delivered = false;
1938 	int err;
1939 
1940 	for_each_net_rcu(net) {
1941 		if (prev) {
1942 			tmp = skb_clone(skb, flags);
1943 			if (!tmp) {
1944 				err = -ENOMEM;
1945 				goto error;
1946 			}
1947 			err = nlmsg_multicast(prev->genl_sock, tmp,
1948 					      portid, group, flags);
1949 			if (!err)
1950 				delivered = true;
1951 			else if (err != -ESRCH)
1952 				goto error;
1953 		}
1954 
1955 		prev = net;
1956 	}
1957 
1958 	err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1959 	if (!err)
1960 		delivered = true;
1961 	else if (err != -ESRCH)
1962 		return err;
1963 	return delivered ? 0 : -ESRCH;
1964  error:
1965 	kfree_skb(skb);
1966 	return err;
1967 }
1968 
1969 int genlmsg_multicast_allns(const struct genl_family *family,
1970 			    struct sk_buff *skb, u32 portid,
1971 			    unsigned int group, gfp_t flags)
1972 {
1973 	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1974 		return -EINVAL;
1975 
1976 	group = family->mcgrp_offset + group;
1977 	return genlmsg_mcast(skb, portid, group, flags);
1978 }
1979 EXPORT_SYMBOL(genlmsg_multicast_allns);
1980 
1981 void genl_notify(const struct genl_family *family, struct sk_buff *skb,
1982 		 struct genl_info *info, u32 group, gfp_t flags)
1983 {
1984 	struct net *net = genl_info_net(info);
1985 	struct sock *sk = net->genl_sock;
1986 
1987 	if (WARN_ON_ONCE(group >= family->n_mcgrps))
1988 		return;
1989 
1990 	group = family->mcgrp_offset + group;
1991 	nlmsg_notify(sk, skb, info->snd_portid, group,
1992 		     nlmsg_report(info->nlhdr), flags);
1993 }
1994 EXPORT_SYMBOL(genl_notify);
1995