xref: /linux/net/netlink/af_netlink.c (revision 52990390f91c1c39ca742fc8f390b29891d95127)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NETLINK      Kernel-user communication protocol.
4  *
5  * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
6  * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7  * 				Patrick McHardy <kaber@trash.net>
8  *
9  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
10  *                               added netlink_proto_exit
11  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
12  * 				 use nlk_sk, as sk->protinfo is on a diet 8)
13  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
14  * 				 - inc module use count of module that owns
15  * 				   the kernel socket in case userspace opens
16  * 				   socket of same protocol
17  * 				 - remove all module support, since netlink is
18  * 				   mandatory if CONFIG_NET=y these days
19  */
20 
21 #include <linux/module.h>
22 
23 #include <linux/bpf.h>
24 #include <linux/capability.h>
25 #include <linux/kernel.h>
26 #include <linux/filter.h>
27 #include <linux/init.h>
28 #include <linux/signal.h>
29 #include <linux/sched.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/stat.h>
33 #include <linux/socket.h>
34 #include <linux/un.h>
35 #include <linux/fcntl.h>
36 #include <linux/termios.h>
37 #include <linux/sockios.h>
38 #include <linux/net.h>
39 #include <linux/fs.h>
40 #include <linux/slab.h>
41 #include <linux/uaccess.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/notifier.h>
48 #include <linux/security.h>
49 #include <linux/jhash.h>
50 #include <linux/jiffies.h>
51 #include <linux/random.h>
52 #include <linux/bitops.h>
53 #include <linux/mm.h>
54 #include <linux/types.h>
55 #include <linux/audit.h>
56 #include <linux/mutex.h>
57 #include <linux/vmalloc.h>
58 #include <linux/if_arp.h>
59 #include <linux/rhashtable.h>
60 #include <asm/cacheflush.h>
61 #include <linux/hash.h>
62 #include <linux/genetlink.h>
63 #include <linux/net_namespace.h>
64 #include <linux/nospec.h>
65 #include <linux/btf_ids.h>
66 
67 #include <net/net_namespace.h>
68 #include <net/netns/generic.h>
69 #include <net/sock.h>
70 #include <net/scm.h>
71 #include <net/netlink.h>
72 #define CREATE_TRACE_POINTS
73 #include <trace/events/netlink.h>
74 
75 #include "af_netlink.h"
76 
77 struct listeners {
78 	struct rcu_head		rcu;
79 	unsigned long		masks[];
80 };
81 
82 /* state bits */
83 #define NETLINK_S_CONGESTED		0x0
84 
85 static inline int netlink_is_kernel(struct sock *sk)
86 {
87 	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
88 }
89 
90 struct netlink_table *nl_table __read_mostly;
91 EXPORT_SYMBOL_GPL(nl_table);
92 
93 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
94 
95 static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
96 
97 static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
98 	"nlk_cb_mutex-ROUTE",
99 	"nlk_cb_mutex-1",
100 	"nlk_cb_mutex-USERSOCK",
101 	"nlk_cb_mutex-FIREWALL",
102 	"nlk_cb_mutex-SOCK_DIAG",
103 	"nlk_cb_mutex-NFLOG",
104 	"nlk_cb_mutex-XFRM",
105 	"nlk_cb_mutex-SELINUX",
106 	"nlk_cb_mutex-ISCSI",
107 	"nlk_cb_mutex-AUDIT",
108 	"nlk_cb_mutex-FIB_LOOKUP",
109 	"nlk_cb_mutex-CONNECTOR",
110 	"nlk_cb_mutex-NETFILTER",
111 	"nlk_cb_mutex-IP6_FW",
112 	"nlk_cb_mutex-DNRTMSG",
113 	"nlk_cb_mutex-KOBJECT_UEVENT",
114 	"nlk_cb_mutex-GENERIC",
115 	"nlk_cb_mutex-17",
116 	"nlk_cb_mutex-SCSITRANSPORT",
117 	"nlk_cb_mutex-ECRYPTFS",
118 	"nlk_cb_mutex-RDMA",
119 	"nlk_cb_mutex-CRYPTO",
120 	"nlk_cb_mutex-SMC",
121 	"nlk_cb_mutex-23",
122 	"nlk_cb_mutex-24",
123 	"nlk_cb_mutex-25",
124 	"nlk_cb_mutex-26",
125 	"nlk_cb_mutex-27",
126 	"nlk_cb_mutex-28",
127 	"nlk_cb_mutex-29",
128 	"nlk_cb_mutex-30",
129 	"nlk_cb_mutex-31",
130 	"nlk_cb_mutex-MAX_LINKS"
131 };
132 
133 static int netlink_dump(struct sock *sk);
134 
135 /* nl_table locking explained:
136  * Lookup and traversal are protected with an RCU read-side lock. Insertion
137  * and removal are protected with per bucket lock while using RCU list
138  * modification primitives and may run in parallel to RCU protected lookups.
139  * Destruction of the Netlink socket may only occur *after* nl_table_lock has
140  * been acquired * either during or after the socket has been removed from
141  * the list and after an RCU grace period.
142  */
143 DEFINE_RWLOCK(nl_table_lock);
144 EXPORT_SYMBOL_GPL(nl_table_lock);
145 static atomic_t nl_table_users = ATOMIC_INIT(0);
146 
147 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
148 
149 static BLOCKING_NOTIFIER_HEAD(netlink_chain);
150 
151 
152 static const struct rhashtable_params netlink_rhashtable_params;
153 
154 void do_trace_netlink_extack(const char *msg)
155 {
156 	trace_netlink_extack(msg);
157 }
158 EXPORT_SYMBOL(do_trace_netlink_extack);
159 
160 static inline u32 netlink_group_mask(u32 group)
161 {
162 	if (group > 32)
163 		return 0;
164 	return group ? 1 << (group - 1) : 0;
165 }
166 
167 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
168 					   gfp_t gfp_mask)
169 {
170 	unsigned int len = skb_end_offset(skb);
171 	struct sk_buff *new;
172 
173 	new = alloc_skb(len, gfp_mask);
174 	if (new == NULL)
175 		return NULL;
176 
177 	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
178 	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
179 	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
180 
181 	skb_put_data(new, skb->data, len);
182 	return new;
183 }
184 
185 static unsigned int netlink_tap_net_id;
186 
187 struct netlink_tap_net {
188 	struct list_head netlink_tap_all;
189 	struct mutex netlink_tap_lock;
190 };
191 
192 int netlink_add_tap(struct netlink_tap *nt)
193 {
194 	struct net *net = dev_net(nt->dev);
195 	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
196 
197 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
198 		return -EINVAL;
199 
200 	mutex_lock(&nn->netlink_tap_lock);
201 	list_add_rcu(&nt->list, &nn->netlink_tap_all);
202 	mutex_unlock(&nn->netlink_tap_lock);
203 
204 	__module_get(nt->module);
205 
206 	return 0;
207 }
208 EXPORT_SYMBOL_GPL(netlink_add_tap);
209 
210 static int __netlink_remove_tap(struct netlink_tap *nt)
211 {
212 	struct net *net = dev_net(nt->dev);
213 	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
214 	bool found = false;
215 	struct netlink_tap *tmp;
216 
217 	mutex_lock(&nn->netlink_tap_lock);
218 
219 	list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
220 		if (nt == tmp) {
221 			list_del_rcu(&nt->list);
222 			found = true;
223 			goto out;
224 		}
225 	}
226 
227 	pr_warn("__netlink_remove_tap: %p not found\n", nt);
228 out:
229 	mutex_unlock(&nn->netlink_tap_lock);
230 
231 	if (found)
232 		module_put(nt->module);
233 
234 	return found ? 0 : -ENODEV;
235 }
236 
237 int netlink_remove_tap(struct netlink_tap *nt)
238 {
239 	int ret;
240 
241 	ret = __netlink_remove_tap(nt);
242 	synchronize_net();
243 
244 	return ret;
245 }
246 EXPORT_SYMBOL_GPL(netlink_remove_tap);
247 
248 static __net_init int netlink_tap_init_net(struct net *net)
249 {
250 	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
251 
252 	INIT_LIST_HEAD(&nn->netlink_tap_all);
253 	mutex_init(&nn->netlink_tap_lock);
254 	return 0;
255 }
256 
257 static struct pernet_operations netlink_tap_net_ops = {
258 	.init = netlink_tap_init_net,
259 	.id   = &netlink_tap_net_id,
260 	.size = sizeof(struct netlink_tap_net),
261 };
262 
263 static bool netlink_filter_tap(const struct sk_buff *skb)
264 {
265 	struct sock *sk = skb->sk;
266 
267 	/* We take the more conservative approach and
268 	 * whitelist socket protocols that may pass.
269 	 */
270 	switch (sk->sk_protocol) {
271 	case NETLINK_ROUTE:
272 	case NETLINK_USERSOCK:
273 	case NETLINK_SOCK_DIAG:
274 	case NETLINK_NFLOG:
275 	case NETLINK_XFRM:
276 	case NETLINK_FIB_LOOKUP:
277 	case NETLINK_NETFILTER:
278 	case NETLINK_GENERIC:
279 		return true;
280 	}
281 
282 	return false;
283 }
284 
285 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
286 				     struct net_device *dev)
287 {
288 	struct sk_buff *nskb;
289 	struct sock *sk = skb->sk;
290 	int ret = -ENOMEM;
291 
292 	if (!net_eq(dev_net(dev), sock_net(sk)))
293 		return 0;
294 
295 	dev_hold(dev);
296 
297 	if (is_vmalloc_addr(skb->head))
298 		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
299 	else
300 		nskb = skb_clone(skb, GFP_ATOMIC);
301 	if (nskb) {
302 		nskb->dev = dev;
303 		nskb->protocol = htons((u16) sk->sk_protocol);
304 		nskb->pkt_type = netlink_is_kernel(sk) ?
305 				 PACKET_KERNEL : PACKET_USER;
306 		skb_reset_network_header(nskb);
307 		ret = dev_queue_xmit(nskb);
308 		if (unlikely(ret > 0))
309 			ret = net_xmit_errno(ret);
310 	}
311 
312 	dev_put(dev);
313 	return ret;
314 }
315 
316 static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
317 {
318 	int ret;
319 	struct netlink_tap *tmp;
320 
321 	if (!netlink_filter_tap(skb))
322 		return;
323 
324 	list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
325 		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
326 		if (unlikely(ret))
327 			break;
328 	}
329 }
330 
331 static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
332 {
333 	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
334 
335 	rcu_read_lock();
336 
337 	if (unlikely(!list_empty(&nn->netlink_tap_all)))
338 		__netlink_deliver_tap(skb, nn);
339 
340 	rcu_read_unlock();
341 }
342 
343 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
344 				       struct sk_buff *skb)
345 {
346 	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
347 		netlink_deliver_tap(sock_net(dst), skb);
348 }
349 
350 static void netlink_overrun(struct sock *sk)
351 {
352 	struct netlink_sock *nlk = nlk_sk(sk);
353 
354 	if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
355 		if (!test_and_set_bit(NETLINK_S_CONGESTED,
356 				      &nlk_sk(sk)->state)) {
357 			sk->sk_err = ENOBUFS;
358 			sk_error_report(sk);
359 		}
360 	}
361 	atomic_inc(&sk->sk_drops);
362 }
363 
364 static void netlink_rcv_wake(struct sock *sk)
365 {
366 	struct netlink_sock *nlk = nlk_sk(sk);
367 
368 	if (skb_queue_empty_lockless(&sk->sk_receive_queue))
369 		clear_bit(NETLINK_S_CONGESTED, &nlk->state);
370 	if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
371 		wake_up_interruptible(&nlk->wait);
372 }
373 
374 static void netlink_skb_destructor(struct sk_buff *skb)
375 {
376 	if (is_vmalloc_addr(skb->head)) {
377 		if (!skb->cloned ||
378 		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
379 			vfree(skb->head);
380 
381 		skb->head = NULL;
382 	}
383 	if (skb->sk != NULL)
384 		sock_rfree(skb);
385 }
386 
387 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
388 {
389 	WARN_ON(skb->sk != NULL);
390 	skb->sk = sk;
391 	skb->destructor = netlink_skb_destructor;
392 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
393 	sk_mem_charge(sk, skb->truesize);
394 }
395 
396 static void netlink_sock_destruct(struct sock *sk)
397 {
398 	struct netlink_sock *nlk = nlk_sk(sk);
399 
400 	if (nlk->cb_running) {
401 		if (nlk->cb.done)
402 			nlk->cb.done(&nlk->cb);
403 		module_put(nlk->cb.module);
404 		kfree_skb(nlk->cb.skb);
405 	}
406 
407 	skb_queue_purge(&sk->sk_receive_queue);
408 
409 	if (!sock_flag(sk, SOCK_DEAD)) {
410 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
411 		return;
412 	}
413 
414 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
415 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
416 	WARN_ON(nlk_sk(sk)->groups);
417 }
418 
419 static void netlink_sock_destruct_work(struct work_struct *work)
420 {
421 	struct netlink_sock *nlk = container_of(work, struct netlink_sock,
422 						work);
423 
424 	sk_free(&nlk->sk);
425 }
426 
427 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
428  * SMP. Look, when several writers sleep and reader wakes them up, all but one
429  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
430  * this, _but_ remember, it adds useless work on UP machines.
431  */
432 
433 void netlink_table_grab(void)
434 	__acquires(nl_table_lock)
435 {
436 	might_sleep();
437 
438 	write_lock_irq(&nl_table_lock);
439 
440 	if (atomic_read(&nl_table_users)) {
441 		DECLARE_WAITQUEUE(wait, current);
442 
443 		add_wait_queue_exclusive(&nl_table_wait, &wait);
444 		for (;;) {
445 			set_current_state(TASK_UNINTERRUPTIBLE);
446 			if (atomic_read(&nl_table_users) == 0)
447 				break;
448 			write_unlock_irq(&nl_table_lock);
449 			schedule();
450 			write_lock_irq(&nl_table_lock);
451 		}
452 
453 		__set_current_state(TASK_RUNNING);
454 		remove_wait_queue(&nl_table_wait, &wait);
455 	}
456 }
457 
458 void netlink_table_ungrab(void)
459 	__releases(nl_table_lock)
460 {
461 	write_unlock_irq(&nl_table_lock);
462 	wake_up(&nl_table_wait);
463 }
464 
465 static inline void
466 netlink_lock_table(void)
467 {
468 	unsigned long flags;
469 
470 	/* read_lock() synchronizes us to netlink_table_grab */
471 
472 	read_lock_irqsave(&nl_table_lock, flags);
473 	atomic_inc(&nl_table_users);
474 	read_unlock_irqrestore(&nl_table_lock, flags);
475 }
476 
477 static inline void
478 netlink_unlock_table(void)
479 {
480 	if (atomic_dec_and_test(&nl_table_users))
481 		wake_up(&nl_table_wait);
482 }
483 
484 struct netlink_compare_arg
485 {
486 	possible_net_t pnet;
487 	u32 portid;
488 };
489 
490 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
491 #define netlink_compare_arg_len \
492 	(offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
493 
494 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
495 				  const void *ptr)
496 {
497 	const struct netlink_compare_arg *x = arg->key;
498 	const struct netlink_sock *nlk = ptr;
499 
500 	return nlk->portid != x->portid ||
501 	       !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
502 }
503 
504 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
505 				     struct net *net, u32 portid)
506 {
507 	memset(arg, 0, sizeof(*arg));
508 	write_pnet(&arg->pnet, net);
509 	arg->portid = portid;
510 }
511 
512 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
513 				     struct net *net)
514 {
515 	struct netlink_compare_arg arg;
516 
517 	netlink_compare_arg_init(&arg, net, portid);
518 	return rhashtable_lookup_fast(&table->hash, &arg,
519 				      netlink_rhashtable_params);
520 }
521 
522 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
523 {
524 	struct netlink_compare_arg arg;
525 
526 	netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
527 	return rhashtable_lookup_insert_key(&table->hash, &arg,
528 					    &nlk_sk(sk)->node,
529 					    netlink_rhashtable_params);
530 }
531 
532 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
533 {
534 	struct netlink_table *table = &nl_table[protocol];
535 	struct sock *sk;
536 
537 	rcu_read_lock();
538 	sk = __netlink_lookup(table, portid, net);
539 	if (sk)
540 		sock_hold(sk);
541 	rcu_read_unlock();
542 
543 	return sk;
544 }
545 
546 static const struct proto_ops netlink_ops;
547 
548 static void
549 netlink_update_listeners(struct sock *sk)
550 {
551 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
552 	unsigned long mask;
553 	unsigned int i;
554 	struct listeners *listeners;
555 
556 	listeners = nl_deref_protected(tbl->listeners);
557 	if (!listeners)
558 		return;
559 
560 	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
561 		mask = 0;
562 		sk_for_each_bound(sk, &tbl->mc_list) {
563 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
564 				mask |= nlk_sk(sk)->groups[i];
565 		}
566 		listeners->masks[i] = mask;
567 	}
568 	/* this function is only called with the netlink table "grabbed", which
569 	 * makes sure updates are visible before bind or setsockopt return. */
570 }
571 
572 static int netlink_insert(struct sock *sk, u32 portid)
573 {
574 	struct netlink_table *table = &nl_table[sk->sk_protocol];
575 	int err;
576 
577 	lock_sock(sk);
578 
579 	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
580 	if (nlk_sk(sk)->bound)
581 		goto err;
582 
583 	/* portid can be read locklessly from netlink_getname(). */
584 	WRITE_ONCE(nlk_sk(sk)->portid, portid);
585 
586 	sock_hold(sk);
587 
588 	err = __netlink_insert(table, sk);
589 	if (err) {
590 		/* In case the hashtable backend returns with -EBUSY
591 		 * from here, it must not escape to the caller.
592 		 */
593 		if (unlikely(err == -EBUSY))
594 			err = -EOVERFLOW;
595 		if (err == -EEXIST)
596 			err = -EADDRINUSE;
597 		sock_put(sk);
598 		goto err;
599 	}
600 
601 	/* We need to ensure that the socket is hashed and visible. */
602 	smp_wmb();
603 	/* Paired with lockless reads from netlink_bind(),
604 	 * netlink_connect() and netlink_sendmsg().
605 	 */
606 	WRITE_ONCE(nlk_sk(sk)->bound, portid);
607 
608 err:
609 	release_sock(sk);
610 	return err;
611 }
612 
613 static void netlink_remove(struct sock *sk)
614 {
615 	struct netlink_table *table;
616 
617 	table = &nl_table[sk->sk_protocol];
618 	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
619 				    netlink_rhashtable_params)) {
620 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
621 		__sock_put(sk);
622 	}
623 
624 	netlink_table_grab();
625 	if (nlk_sk(sk)->subscriptions) {
626 		__sk_del_bind_node(sk);
627 		netlink_update_listeners(sk);
628 	}
629 	if (sk->sk_protocol == NETLINK_GENERIC)
630 		atomic_inc(&genl_sk_destructing_cnt);
631 	netlink_table_ungrab();
632 }
633 
634 static struct proto netlink_proto = {
635 	.name	  = "NETLINK",
636 	.owner	  = THIS_MODULE,
637 	.obj_size = sizeof(struct netlink_sock),
638 };
639 
640 static int __netlink_create(struct net *net, struct socket *sock,
641 			    struct mutex *cb_mutex, int protocol,
642 			    int kern)
643 {
644 	struct sock *sk;
645 	struct netlink_sock *nlk;
646 
647 	sock->ops = &netlink_ops;
648 
649 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
650 	if (!sk)
651 		return -ENOMEM;
652 
653 	sock_init_data(sock, sk);
654 
655 	nlk = nlk_sk(sk);
656 	if (cb_mutex) {
657 		nlk->cb_mutex = cb_mutex;
658 	} else {
659 		nlk->cb_mutex = &nlk->cb_def_mutex;
660 		mutex_init(nlk->cb_mutex);
661 		lockdep_set_class_and_name(nlk->cb_mutex,
662 					   nlk_cb_mutex_keys + protocol,
663 					   nlk_cb_mutex_key_strings[protocol]);
664 	}
665 	init_waitqueue_head(&nlk->wait);
666 
667 	sk->sk_destruct = netlink_sock_destruct;
668 	sk->sk_protocol = protocol;
669 	return 0;
670 }
671 
672 static int netlink_create(struct net *net, struct socket *sock, int protocol,
673 			  int kern)
674 {
675 	struct module *module = NULL;
676 	struct mutex *cb_mutex;
677 	struct netlink_sock *nlk;
678 	int (*bind)(struct net *net, int group);
679 	void (*unbind)(struct net *net, int group);
680 	int err = 0;
681 
682 	sock->state = SS_UNCONNECTED;
683 
684 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
685 		return -ESOCKTNOSUPPORT;
686 
687 	if (protocol < 0 || protocol >= MAX_LINKS)
688 		return -EPROTONOSUPPORT;
689 	protocol = array_index_nospec(protocol, MAX_LINKS);
690 
691 	netlink_lock_table();
692 #ifdef CONFIG_MODULES
693 	if (!nl_table[protocol].registered) {
694 		netlink_unlock_table();
695 		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
696 		netlink_lock_table();
697 	}
698 #endif
699 	if (nl_table[protocol].registered &&
700 	    try_module_get(nl_table[protocol].module))
701 		module = nl_table[protocol].module;
702 	else
703 		err = -EPROTONOSUPPORT;
704 	cb_mutex = nl_table[protocol].cb_mutex;
705 	bind = nl_table[protocol].bind;
706 	unbind = nl_table[protocol].unbind;
707 	netlink_unlock_table();
708 
709 	if (err < 0)
710 		goto out;
711 
712 	err = __netlink_create(net, sock, cb_mutex, protocol, kern);
713 	if (err < 0)
714 		goto out_module;
715 
716 	sock_prot_inuse_add(net, &netlink_proto, 1);
717 
718 	nlk = nlk_sk(sock->sk);
719 	nlk->module = module;
720 	nlk->netlink_bind = bind;
721 	nlk->netlink_unbind = unbind;
722 out:
723 	return err;
724 
725 out_module:
726 	module_put(module);
727 	goto out;
728 }
729 
730 static void deferred_put_nlk_sk(struct rcu_head *head)
731 {
732 	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
733 	struct sock *sk = &nlk->sk;
734 
735 	kfree(nlk->groups);
736 	nlk->groups = NULL;
737 
738 	if (!refcount_dec_and_test(&sk->sk_refcnt))
739 		return;
740 
741 	if (nlk->cb_running && nlk->cb.done) {
742 		INIT_WORK(&nlk->work, netlink_sock_destruct_work);
743 		schedule_work(&nlk->work);
744 		return;
745 	}
746 
747 	sk_free(sk);
748 }
749 
750 static int netlink_release(struct socket *sock)
751 {
752 	struct sock *sk = sock->sk;
753 	struct netlink_sock *nlk;
754 
755 	if (!sk)
756 		return 0;
757 
758 	netlink_remove(sk);
759 	sock_orphan(sk);
760 	nlk = nlk_sk(sk);
761 
762 	/*
763 	 * OK. Socket is unlinked, any packets that arrive now
764 	 * will be purged.
765 	 */
766 
767 	/* must not acquire netlink_table_lock in any way again before unbind
768 	 * and notifying genetlink is done as otherwise it might deadlock
769 	 */
770 	if (nlk->netlink_unbind) {
771 		int i;
772 
773 		for (i = 0; i < nlk->ngroups; i++)
774 			if (test_bit(i, nlk->groups))
775 				nlk->netlink_unbind(sock_net(sk), i + 1);
776 	}
777 	if (sk->sk_protocol == NETLINK_GENERIC &&
778 	    atomic_dec_return(&genl_sk_destructing_cnt) == 0)
779 		wake_up(&genl_sk_destructing_waitq);
780 
781 	sock->sk = NULL;
782 	wake_up_interruptible_all(&nlk->wait);
783 
784 	skb_queue_purge(&sk->sk_write_queue);
785 
786 	if (nlk->portid && nlk->bound) {
787 		struct netlink_notify n = {
788 						.net = sock_net(sk),
789 						.protocol = sk->sk_protocol,
790 						.portid = nlk->portid,
791 					  };
792 		blocking_notifier_call_chain(&netlink_chain,
793 				NETLINK_URELEASE, &n);
794 	}
795 
796 	module_put(nlk->module);
797 
798 	if (netlink_is_kernel(sk)) {
799 		netlink_table_grab();
800 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
801 		if (--nl_table[sk->sk_protocol].registered == 0) {
802 			struct listeners *old;
803 
804 			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
805 			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
806 			kfree_rcu(old, rcu);
807 			nl_table[sk->sk_protocol].module = NULL;
808 			nl_table[sk->sk_protocol].bind = NULL;
809 			nl_table[sk->sk_protocol].unbind = NULL;
810 			nl_table[sk->sk_protocol].flags = 0;
811 			nl_table[sk->sk_protocol].registered = 0;
812 		}
813 		netlink_table_ungrab();
814 	}
815 
816 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
817 
818 	/* Because struct net might disappear soon, do not keep a pointer. */
819 	if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
820 		__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
821 		/* Because of deferred_put_nlk_sk and use of work queue,
822 		 * it is possible  netns will be freed before this socket.
823 		 */
824 		sock_net_set(sk, &init_net);
825 		__netns_tracker_alloc(&init_net, &sk->ns_tracker,
826 				      false, GFP_KERNEL);
827 	}
828 	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
829 	return 0;
830 }
831 
832 static int netlink_autobind(struct socket *sock)
833 {
834 	struct sock *sk = sock->sk;
835 	struct net *net = sock_net(sk);
836 	struct netlink_table *table = &nl_table[sk->sk_protocol];
837 	s32 portid = task_tgid_vnr(current);
838 	int err;
839 	s32 rover = -4096;
840 	bool ok;
841 
842 retry:
843 	cond_resched();
844 	rcu_read_lock();
845 	ok = !__netlink_lookup(table, portid, net);
846 	rcu_read_unlock();
847 	if (!ok) {
848 		/* Bind collision, search negative portid values. */
849 		if (rover == -4096)
850 			/* rover will be in range [S32_MIN, -4097] */
851 			rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN);
852 		else if (rover >= -4096)
853 			rover = -4097;
854 		portid = rover--;
855 		goto retry;
856 	}
857 
858 	err = netlink_insert(sk, portid);
859 	if (err == -EADDRINUSE)
860 		goto retry;
861 
862 	/* If 2 threads race to autobind, that is fine.  */
863 	if (err == -EBUSY)
864 		err = 0;
865 
866 	return err;
867 }
868 
869 /**
870  * __netlink_ns_capable - General netlink message capability test
871  * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
872  * @user_ns: The user namespace of the capability to use
873  * @cap: The capability to use
874  *
875  * Test to see if the opener of the socket we received the message
876  * from had when the netlink socket was created and the sender of the
877  * message has the capability @cap in the user namespace @user_ns.
878  */
879 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
880 			struct user_namespace *user_ns, int cap)
881 {
882 	return ((nsp->flags & NETLINK_SKB_DST) ||
883 		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
884 		ns_capable(user_ns, cap);
885 }
886 EXPORT_SYMBOL(__netlink_ns_capable);
887 
888 /**
889  * netlink_ns_capable - General netlink message capability test
890  * @skb: socket buffer holding a netlink command from userspace
891  * @user_ns: The user namespace of the capability to use
892  * @cap: The capability to use
893  *
894  * Test to see if the opener of the socket we received the message
895  * from had when the netlink socket was created and the sender of the
896  * message has the capability @cap in the user namespace @user_ns.
897  */
898 bool netlink_ns_capable(const struct sk_buff *skb,
899 			struct user_namespace *user_ns, int cap)
900 {
901 	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
902 }
903 EXPORT_SYMBOL(netlink_ns_capable);
904 
905 /**
906  * netlink_capable - Netlink global message capability test
907  * @skb: socket buffer holding a netlink command from userspace
908  * @cap: The capability to use
909  *
910  * Test to see if the opener of the socket we received the message
911  * from had when the netlink socket was created and the sender of the
912  * message has the capability @cap in all user namespaces.
913  */
914 bool netlink_capable(const struct sk_buff *skb, int cap)
915 {
916 	return netlink_ns_capable(skb, &init_user_ns, cap);
917 }
918 EXPORT_SYMBOL(netlink_capable);
919 
920 /**
921  * netlink_net_capable - Netlink network namespace message capability test
922  * @skb: socket buffer holding a netlink command from userspace
923  * @cap: The capability to use
924  *
925  * Test to see if the opener of the socket we received the message
926  * from had when the netlink socket was created and the sender of the
927  * message has the capability @cap over the network namespace of
928  * the socket we received the message from.
929  */
930 bool netlink_net_capable(const struct sk_buff *skb, int cap)
931 {
932 	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
933 }
934 EXPORT_SYMBOL(netlink_net_capable);
935 
936 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
937 {
938 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
939 		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
940 }
941 
942 static void
943 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
944 {
945 	struct netlink_sock *nlk = nlk_sk(sk);
946 
947 	if (nlk->subscriptions && !subscriptions)
948 		__sk_del_bind_node(sk);
949 	else if (!nlk->subscriptions && subscriptions)
950 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
951 	nlk->subscriptions = subscriptions;
952 }
953 
954 static int netlink_realloc_groups(struct sock *sk)
955 {
956 	struct netlink_sock *nlk = nlk_sk(sk);
957 	unsigned int groups;
958 	unsigned long *new_groups;
959 	int err = 0;
960 
961 	netlink_table_grab();
962 
963 	groups = nl_table[sk->sk_protocol].groups;
964 	if (!nl_table[sk->sk_protocol].registered) {
965 		err = -ENOENT;
966 		goto out_unlock;
967 	}
968 
969 	if (nlk->ngroups >= groups)
970 		goto out_unlock;
971 
972 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
973 	if (new_groups == NULL) {
974 		err = -ENOMEM;
975 		goto out_unlock;
976 	}
977 	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
978 	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
979 
980 	nlk->groups = new_groups;
981 	nlk->ngroups = groups;
982  out_unlock:
983 	netlink_table_ungrab();
984 	return err;
985 }
986 
987 static void netlink_undo_bind(int group, long unsigned int groups,
988 			      struct sock *sk)
989 {
990 	struct netlink_sock *nlk = nlk_sk(sk);
991 	int undo;
992 
993 	if (!nlk->netlink_unbind)
994 		return;
995 
996 	for (undo = 0; undo < group; undo++)
997 		if (test_bit(undo, &groups))
998 			nlk->netlink_unbind(sock_net(sk), undo + 1);
999 }
1000 
1001 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1002 			int addr_len)
1003 {
1004 	struct sock *sk = sock->sk;
1005 	struct net *net = sock_net(sk);
1006 	struct netlink_sock *nlk = nlk_sk(sk);
1007 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1008 	int err = 0;
1009 	unsigned long groups;
1010 	bool bound;
1011 
1012 	if (addr_len < sizeof(struct sockaddr_nl))
1013 		return -EINVAL;
1014 
1015 	if (nladdr->nl_family != AF_NETLINK)
1016 		return -EINVAL;
1017 	groups = nladdr->nl_groups;
1018 
1019 	/* Only superuser is allowed to listen multicasts */
1020 	if (groups) {
1021 		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1022 			return -EPERM;
1023 		err = netlink_realloc_groups(sk);
1024 		if (err)
1025 			return err;
1026 	}
1027 
1028 	if (nlk->ngroups < BITS_PER_LONG)
1029 		groups &= (1UL << nlk->ngroups) - 1;
1030 
1031 	/* Paired with WRITE_ONCE() in netlink_insert() */
1032 	bound = READ_ONCE(nlk->bound);
1033 	if (bound) {
1034 		/* Ensure nlk->portid is up-to-date. */
1035 		smp_rmb();
1036 
1037 		if (nladdr->nl_pid != nlk->portid)
1038 			return -EINVAL;
1039 	}
1040 
1041 	if (nlk->netlink_bind && groups) {
1042 		int group;
1043 
1044 		/* nl_groups is a u32, so cap the maximum groups we can bind */
1045 		for (group = 0; group < BITS_PER_TYPE(u32); group++) {
1046 			if (!test_bit(group, &groups))
1047 				continue;
1048 			err = nlk->netlink_bind(net, group + 1);
1049 			if (!err)
1050 				continue;
1051 			netlink_undo_bind(group, groups, sk);
1052 			return err;
1053 		}
1054 	}
1055 
1056 	/* No need for barriers here as we return to user-space without
1057 	 * using any of the bound attributes.
1058 	 */
1059 	netlink_lock_table();
1060 	if (!bound) {
1061 		err = nladdr->nl_pid ?
1062 			netlink_insert(sk, nladdr->nl_pid) :
1063 			netlink_autobind(sock);
1064 		if (err) {
1065 			netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
1066 			goto unlock;
1067 		}
1068 	}
1069 
1070 	if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1071 		goto unlock;
1072 	netlink_unlock_table();
1073 
1074 	netlink_table_grab();
1075 	netlink_update_subscriptions(sk, nlk->subscriptions +
1076 					 hweight32(groups) -
1077 					 hweight32(nlk->groups[0]));
1078 	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1079 	netlink_update_listeners(sk);
1080 	netlink_table_ungrab();
1081 
1082 	return 0;
1083 
1084 unlock:
1085 	netlink_unlock_table();
1086 	return err;
1087 }
1088 
1089 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1090 			   int alen, int flags)
1091 {
1092 	int err = 0;
1093 	struct sock *sk = sock->sk;
1094 	struct netlink_sock *nlk = nlk_sk(sk);
1095 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1096 
1097 	if (alen < sizeof(addr->sa_family))
1098 		return -EINVAL;
1099 
1100 	if (addr->sa_family == AF_UNSPEC) {
1101 		/* paired with READ_ONCE() in netlink_getsockbyportid() */
1102 		WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
1103 		/* dst_portid and dst_group can be read locklessly */
1104 		WRITE_ONCE(nlk->dst_portid, 0);
1105 		WRITE_ONCE(nlk->dst_group, 0);
1106 		return 0;
1107 	}
1108 	if (addr->sa_family != AF_NETLINK)
1109 		return -EINVAL;
1110 
1111 	if (alen < sizeof(struct sockaddr_nl))
1112 		return -EINVAL;
1113 
1114 	if ((nladdr->nl_groups || nladdr->nl_pid) &&
1115 	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1116 		return -EPERM;
1117 
1118 	/* No need for barriers here as we return to user-space without
1119 	 * using any of the bound attributes.
1120 	 * Paired with WRITE_ONCE() in netlink_insert().
1121 	 */
1122 	if (!READ_ONCE(nlk->bound))
1123 		err = netlink_autobind(sock);
1124 
1125 	if (err == 0) {
1126 		/* paired with READ_ONCE() in netlink_getsockbyportid() */
1127 		WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
1128 		/* dst_portid and dst_group can be read locklessly */
1129 		WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
1130 		WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
1131 	}
1132 
1133 	return err;
1134 }
1135 
1136 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1137 			   int peer)
1138 {
1139 	struct sock *sk = sock->sk;
1140 	struct netlink_sock *nlk = nlk_sk(sk);
1141 	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1142 
1143 	nladdr->nl_family = AF_NETLINK;
1144 	nladdr->nl_pad = 0;
1145 
1146 	if (peer) {
1147 		/* Paired with WRITE_ONCE() in netlink_connect() */
1148 		nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
1149 		nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
1150 	} else {
1151 		/* Paired with WRITE_ONCE() in netlink_insert() */
1152 		nladdr->nl_pid = READ_ONCE(nlk->portid);
1153 		netlink_lock_table();
1154 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1155 		netlink_unlock_table();
1156 	}
1157 	return sizeof(*nladdr);
1158 }
1159 
1160 static int netlink_ioctl(struct socket *sock, unsigned int cmd,
1161 			 unsigned long arg)
1162 {
1163 	/* try to hand this ioctl down to the NIC drivers.
1164 	 */
1165 	return -ENOIOCTLCMD;
1166 }
1167 
1168 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1169 {
1170 	struct sock *sock;
1171 	struct netlink_sock *nlk;
1172 
1173 	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1174 	if (!sock)
1175 		return ERR_PTR(-ECONNREFUSED);
1176 
1177 	/* Don't bother queuing skb if kernel socket has no input function */
1178 	nlk = nlk_sk(sock);
1179 	/* dst_portid and sk_state can be changed in netlink_connect() */
1180 	if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
1181 	    READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
1182 		sock_put(sock);
1183 		return ERR_PTR(-ECONNREFUSED);
1184 	}
1185 	return sock;
1186 }
1187 
1188 struct sock *netlink_getsockbyfilp(struct file *filp)
1189 {
1190 	struct inode *inode = file_inode(filp);
1191 	struct sock *sock;
1192 
1193 	if (!S_ISSOCK(inode->i_mode))
1194 		return ERR_PTR(-ENOTSOCK);
1195 
1196 	sock = SOCKET_I(inode)->sk;
1197 	if (sock->sk_family != AF_NETLINK)
1198 		return ERR_PTR(-EINVAL);
1199 
1200 	sock_hold(sock);
1201 	return sock;
1202 }
1203 
1204 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1205 					       int broadcast)
1206 {
1207 	struct sk_buff *skb;
1208 	void *data;
1209 
1210 	if (size <= NLMSG_GOODSIZE || broadcast)
1211 		return alloc_skb(size, GFP_KERNEL);
1212 
1213 	size = SKB_DATA_ALIGN(size) +
1214 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1215 
1216 	data = vmalloc(size);
1217 	if (data == NULL)
1218 		return NULL;
1219 
1220 	skb = __build_skb(data, size);
1221 	if (skb == NULL)
1222 		vfree(data);
1223 	else
1224 		skb->destructor = netlink_skb_destructor;
1225 
1226 	return skb;
1227 }
1228 
1229 /*
1230  * Attach a skb to a netlink socket.
1231  * The caller must hold a reference to the destination socket. On error, the
1232  * reference is dropped. The skb is not send to the destination, just all
1233  * all error checks are performed and memory in the queue is reserved.
1234  * Return values:
1235  * < 0: error. skb freed, reference to sock dropped.
1236  * 0: continue
1237  * 1: repeat lookup - reference dropped while waiting for socket memory.
1238  */
1239 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1240 		      long *timeo, struct sock *ssk)
1241 {
1242 	struct netlink_sock *nlk;
1243 
1244 	nlk = nlk_sk(sk);
1245 
1246 	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1247 	     test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1248 		DECLARE_WAITQUEUE(wait, current);
1249 		if (!*timeo) {
1250 			if (!ssk || netlink_is_kernel(ssk))
1251 				netlink_overrun(sk);
1252 			sock_put(sk);
1253 			kfree_skb(skb);
1254 			return -EAGAIN;
1255 		}
1256 
1257 		__set_current_state(TASK_INTERRUPTIBLE);
1258 		add_wait_queue(&nlk->wait, &wait);
1259 
1260 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1261 		     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1262 		    !sock_flag(sk, SOCK_DEAD))
1263 			*timeo = schedule_timeout(*timeo);
1264 
1265 		__set_current_state(TASK_RUNNING);
1266 		remove_wait_queue(&nlk->wait, &wait);
1267 		sock_put(sk);
1268 
1269 		if (signal_pending(current)) {
1270 			kfree_skb(skb);
1271 			return sock_intr_errno(*timeo);
1272 		}
1273 		return 1;
1274 	}
1275 	netlink_skb_set_owner_r(skb, sk);
1276 	return 0;
1277 }
1278 
1279 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1280 {
1281 	int len = skb->len;
1282 
1283 	netlink_deliver_tap(sock_net(sk), skb);
1284 
1285 	skb_queue_tail(&sk->sk_receive_queue, skb);
1286 	sk->sk_data_ready(sk);
1287 	return len;
1288 }
1289 
1290 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1291 {
1292 	int len = __netlink_sendskb(sk, skb);
1293 
1294 	sock_put(sk);
1295 	return len;
1296 }
1297 
1298 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1299 {
1300 	kfree_skb(skb);
1301 	sock_put(sk);
1302 }
1303 
1304 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1305 {
1306 	int delta;
1307 
1308 	WARN_ON(skb->sk != NULL);
1309 	delta = skb->end - skb->tail;
1310 	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1311 		return skb;
1312 
1313 	if (skb_shared(skb)) {
1314 		struct sk_buff *nskb = skb_clone(skb, allocation);
1315 		if (!nskb)
1316 			return skb;
1317 		consume_skb(skb);
1318 		skb = nskb;
1319 	}
1320 
1321 	pskb_expand_head(skb, 0, -delta,
1322 			 (allocation & ~__GFP_DIRECT_RECLAIM) |
1323 			 __GFP_NOWARN | __GFP_NORETRY);
1324 	return skb;
1325 }
1326 
1327 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1328 				  struct sock *ssk)
1329 {
1330 	int ret;
1331 	struct netlink_sock *nlk = nlk_sk(sk);
1332 
1333 	ret = -ECONNREFUSED;
1334 	if (nlk->netlink_rcv != NULL) {
1335 		ret = skb->len;
1336 		netlink_skb_set_owner_r(skb, sk);
1337 		NETLINK_CB(skb).sk = ssk;
1338 		netlink_deliver_tap_kernel(sk, ssk, skb);
1339 		nlk->netlink_rcv(skb);
1340 		consume_skb(skb);
1341 	} else {
1342 		kfree_skb(skb);
1343 	}
1344 	sock_put(sk);
1345 	return ret;
1346 }
1347 
1348 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1349 		    u32 portid, int nonblock)
1350 {
1351 	struct sock *sk;
1352 	int err;
1353 	long timeo;
1354 
1355 	skb = netlink_trim(skb, gfp_any());
1356 
1357 	timeo = sock_sndtimeo(ssk, nonblock);
1358 retry:
1359 	sk = netlink_getsockbyportid(ssk, portid);
1360 	if (IS_ERR(sk)) {
1361 		kfree_skb(skb);
1362 		return PTR_ERR(sk);
1363 	}
1364 	if (netlink_is_kernel(sk))
1365 		return netlink_unicast_kernel(sk, skb, ssk);
1366 
1367 	if (sk_filter(sk, skb)) {
1368 		err = skb->len;
1369 		kfree_skb(skb);
1370 		sock_put(sk);
1371 		return err;
1372 	}
1373 
1374 	err = netlink_attachskb(sk, skb, &timeo, ssk);
1375 	if (err == 1)
1376 		goto retry;
1377 	if (err)
1378 		return err;
1379 
1380 	return netlink_sendskb(sk, skb);
1381 }
1382 EXPORT_SYMBOL(netlink_unicast);
1383 
1384 int netlink_has_listeners(struct sock *sk, unsigned int group)
1385 {
1386 	int res = 0;
1387 	struct listeners *listeners;
1388 
1389 	BUG_ON(!netlink_is_kernel(sk));
1390 
1391 	rcu_read_lock();
1392 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1393 
1394 	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1395 		res = test_bit(group - 1, listeners->masks);
1396 
1397 	rcu_read_unlock();
1398 
1399 	return res;
1400 }
1401 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1402 
1403 bool netlink_strict_get_check(struct sk_buff *skb)
1404 {
1405 	const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
1406 
1407 	return nlk->flags & NETLINK_F_STRICT_CHK;
1408 }
1409 EXPORT_SYMBOL_GPL(netlink_strict_get_check);
1410 
1411 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1412 {
1413 	struct netlink_sock *nlk = nlk_sk(sk);
1414 
1415 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1416 	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1417 		netlink_skb_set_owner_r(skb, sk);
1418 		__netlink_sendskb(sk, skb);
1419 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1420 	}
1421 	return -1;
1422 }
1423 
1424 struct netlink_broadcast_data {
1425 	struct sock *exclude_sk;
1426 	struct net *net;
1427 	u32 portid;
1428 	u32 group;
1429 	int failure;
1430 	int delivery_failure;
1431 	int congested;
1432 	int delivered;
1433 	gfp_t allocation;
1434 	struct sk_buff *skb, *skb2;
1435 };
1436 
1437 static void do_one_broadcast(struct sock *sk,
1438 				    struct netlink_broadcast_data *p)
1439 {
1440 	struct netlink_sock *nlk = nlk_sk(sk);
1441 	int val;
1442 
1443 	if (p->exclude_sk == sk)
1444 		return;
1445 
1446 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1447 	    !test_bit(p->group - 1, nlk->groups))
1448 		return;
1449 
1450 	if (!net_eq(sock_net(sk), p->net)) {
1451 		if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1452 			return;
1453 
1454 		if (!peernet_has_id(sock_net(sk), p->net))
1455 			return;
1456 
1457 		if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1458 				     CAP_NET_BROADCAST))
1459 			return;
1460 	}
1461 
1462 	if (p->failure) {
1463 		netlink_overrun(sk);
1464 		return;
1465 	}
1466 
1467 	sock_hold(sk);
1468 	if (p->skb2 == NULL) {
1469 		if (skb_shared(p->skb)) {
1470 			p->skb2 = skb_clone(p->skb, p->allocation);
1471 		} else {
1472 			p->skb2 = skb_get(p->skb);
1473 			/*
1474 			 * skb ownership may have been set when
1475 			 * delivered to a previous socket.
1476 			 */
1477 			skb_orphan(p->skb2);
1478 		}
1479 	}
1480 	if (p->skb2 == NULL) {
1481 		netlink_overrun(sk);
1482 		/* Clone failed. Notify ALL listeners. */
1483 		p->failure = 1;
1484 		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1485 			p->delivery_failure = 1;
1486 		goto out;
1487 	}
1488 	if (sk_filter(sk, p->skb2)) {
1489 		kfree_skb(p->skb2);
1490 		p->skb2 = NULL;
1491 		goto out;
1492 	}
1493 	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1494 	if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1495 		NETLINK_CB(p->skb2).nsid_is_set = true;
1496 	val = netlink_broadcast_deliver(sk, p->skb2);
1497 	if (val < 0) {
1498 		netlink_overrun(sk);
1499 		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1500 			p->delivery_failure = 1;
1501 	} else {
1502 		p->congested |= val;
1503 		p->delivered = 1;
1504 		p->skb2 = NULL;
1505 	}
1506 out:
1507 	sock_put(sk);
1508 }
1509 
1510 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1511 		      u32 group, gfp_t allocation)
1512 {
1513 	struct net *net = sock_net(ssk);
1514 	struct netlink_broadcast_data info;
1515 	struct sock *sk;
1516 
1517 	skb = netlink_trim(skb, allocation);
1518 
1519 	info.exclude_sk = ssk;
1520 	info.net = net;
1521 	info.portid = portid;
1522 	info.group = group;
1523 	info.failure = 0;
1524 	info.delivery_failure = 0;
1525 	info.congested = 0;
1526 	info.delivered = 0;
1527 	info.allocation = allocation;
1528 	info.skb = skb;
1529 	info.skb2 = NULL;
1530 
1531 	/* While we sleep in clone, do not allow to change socket list */
1532 
1533 	netlink_lock_table();
1534 
1535 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1536 		do_one_broadcast(sk, &info);
1537 
1538 	consume_skb(skb);
1539 
1540 	netlink_unlock_table();
1541 
1542 	if (info.delivery_failure) {
1543 		kfree_skb(info.skb2);
1544 		return -ENOBUFS;
1545 	}
1546 	consume_skb(info.skb2);
1547 
1548 	if (info.delivered) {
1549 		if (info.congested && gfpflags_allow_blocking(allocation))
1550 			yield();
1551 		return 0;
1552 	}
1553 	return -ESRCH;
1554 }
1555 EXPORT_SYMBOL(netlink_broadcast);
1556 
1557 struct netlink_set_err_data {
1558 	struct sock *exclude_sk;
1559 	u32 portid;
1560 	u32 group;
1561 	int code;
1562 };
1563 
1564 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1565 {
1566 	struct netlink_sock *nlk = nlk_sk(sk);
1567 	int ret = 0;
1568 
1569 	if (sk == p->exclude_sk)
1570 		goto out;
1571 
1572 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1573 		goto out;
1574 
1575 	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1576 	    !test_bit(p->group - 1, nlk->groups))
1577 		goto out;
1578 
1579 	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1580 		ret = 1;
1581 		goto out;
1582 	}
1583 
1584 	sk->sk_err = p->code;
1585 	sk_error_report(sk);
1586 out:
1587 	return ret;
1588 }
1589 
1590 /**
1591  * netlink_set_err - report error to broadcast listeners
1592  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1593  * @portid: the PORTID of a process that we want to skip (if any)
1594  * @group: the broadcast group that will notice the error
1595  * @code: error code, must be negative (as usual in kernelspace)
1596  *
1597  * This function returns the number of broadcast listeners that have set the
1598  * NETLINK_NO_ENOBUFS socket option.
1599  */
1600 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1601 {
1602 	struct netlink_set_err_data info;
1603 	struct sock *sk;
1604 	int ret = 0;
1605 
1606 	info.exclude_sk = ssk;
1607 	info.portid = portid;
1608 	info.group = group;
1609 	/* sk->sk_err wants a positive error value */
1610 	info.code = -code;
1611 
1612 	read_lock(&nl_table_lock);
1613 
1614 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1615 		ret += do_one_set_err(sk, &info);
1616 
1617 	read_unlock(&nl_table_lock);
1618 	return ret;
1619 }
1620 EXPORT_SYMBOL(netlink_set_err);
1621 
1622 /* must be called with netlink table grabbed */
1623 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1624 				     unsigned int group,
1625 				     int is_new)
1626 {
1627 	int old, new = !!is_new, subscriptions;
1628 
1629 	old = test_bit(group - 1, nlk->groups);
1630 	subscriptions = nlk->subscriptions - old + new;
1631 	if (new)
1632 		__set_bit(group - 1, nlk->groups);
1633 	else
1634 		__clear_bit(group - 1, nlk->groups);
1635 	netlink_update_subscriptions(&nlk->sk, subscriptions);
1636 	netlink_update_listeners(&nlk->sk);
1637 }
1638 
1639 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1640 			      sockptr_t optval, unsigned int optlen)
1641 {
1642 	struct sock *sk = sock->sk;
1643 	struct netlink_sock *nlk = nlk_sk(sk);
1644 	unsigned int val = 0;
1645 	int err;
1646 
1647 	if (level != SOL_NETLINK)
1648 		return -ENOPROTOOPT;
1649 
1650 	if (optlen >= sizeof(int) &&
1651 	    copy_from_sockptr(&val, optval, sizeof(val)))
1652 		return -EFAULT;
1653 
1654 	switch (optname) {
1655 	case NETLINK_PKTINFO:
1656 		if (val)
1657 			nlk->flags |= NETLINK_F_RECV_PKTINFO;
1658 		else
1659 			nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
1660 		err = 0;
1661 		break;
1662 	case NETLINK_ADD_MEMBERSHIP:
1663 	case NETLINK_DROP_MEMBERSHIP: {
1664 		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1665 			return -EPERM;
1666 		err = netlink_realloc_groups(sk);
1667 		if (err)
1668 			return err;
1669 		if (!val || val - 1 >= nlk->ngroups)
1670 			return -EINVAL;
1671 		if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1672 			err = nlk->netlink_bind(sock_net(sk), val);
1673 			if (err)
1674 				return err;
1675 		}
1676 		netlink_table_grab();
1677 		netlink_update_socket_mc(nlk, val,
1678 					 optname == NETLINK_ADD_MEMBERSHIP);
1679 		netlink_table_ungrab();
1680 		if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1681 			nlk->netlink_unbind(sock_net(sk), val);
1682 
1683 		err = 0;
1684 		break;
1685 	}
1686 	case NETLINK_BROADCAST_ERROR:
1687 		if (val)
1688 			nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
1689 		else
1690 			nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
1691 		err = 0;
1692 		break;
1693 	case NETLINK_NO_ENOBUFS:
1694 		if (val) {
1695 			nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
1696 			clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1697 			wake_up_interruptible(&nlk->wait);
1698 		} else {
1699 			nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
1700 		}
1701 		err = 0;
1702 		break;
1703 	case NETLINK_LISTEN_ALL_NSID:
1704 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
1705 			return -EPERM;
1706 
1707 		if (val)
1708 			nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
1709 		else
1710 			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
1711 		err = 0;
1712 		break;
1713 	case NETLINK_CAP_ACK:
1714 		if (val)
1715 			nlk->flags |= NETLINK_F_CAP_ACK;
1716 		else
1717 			nlk->flags &= ~NETLINK_F_CAP_ACK;
1718 		err = 0;
1719 		break;
1720 	case NETLINK_EXT_ACK:
1721 		if (val)
1722 			nlk->flags |= NETLINK_F_EXT_ACK;
1723 		else
1724 			nlk->flags &= ~NETLINK_F_EXT_ACK;
1725 		err = 0;
1726 		break;
1727 	case NETLINK_GET_STRICT_CHK:
1728 		if (val)
1729 			nlk->flags |= NETLINK_F_STRICT_CHK;
1730 		else
1731 			nlk->flags &= ~NETLINK_F_STRICT_CHK;
1732 		err = 0;
1733 		break;
1734 	default:
1735 		err = -ENOPROTOOPT;
1736 	}
1737 	return err;
1738 }
1739 
1740 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1741 			      char __user *optval, int __user *optlen)
1742 {
1743 	struct sock *sk = sock->sk;
1744 	struct netlink_sock *nlk = nlk_sk(sk);
1745 	unsigned int flag;
1746 	int len, val;
1747 
1748 	if (level != SOL_NETLINK)
1749 		return -ENOPROTOOPT;
1750 
1751 	if (get_user(len, optlen))
1752 		return -EFAULT;
1753 	if (len < 0)
1754 		return -EINVAL;
1755 
1756 	switch (optname) {
1757 	case NETLINK_PKTINFO:
1758 		flag = NETLINK_F_RECV_PKTINFO;
1759 		break;
1760 	case NETLINK_BROADCAST_ERROR:
1761 		flag = NETLINK_F_BROADCAST_SEND_ERROR;
1762 		break;
1763 	case NETLINK_NO_ENOBUFS:
1764 		flag = NETLINK_F_RECV_NO_ENOBUFS;
1765 		break;
1766 	case NETLINK_LIST_MEMBERSHIPS: {
1767 		int pos, idx, shift, err = 0;
1768 
1769 		netlink_lock_table();
1770 		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1771 			if (len - pos < sizeof(u32))
1772 				break;
1773 
1774 			idx = pos / sizeof(unsigned long);
1775 			shift = (pos % sizeof(unsigned long)) * 8;
1776 			if (put_user((u32)(nlk->groups[idx] >> shift),
1777 				     (u32 __user *)(optval + pos))) {
1778 				err = -EFAULT;
1779 				break;
1780 			}
1781 		}
1782 		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
1783 			err = -EFAULT;
1784 		netlink_unlock_table();
1785 		return err;
1786 	}
1787 	case NETLINK_CAP_ACK:
1788 		flag = NETLINK_F_CAP_ACK;
1789 		break;
1790 	case NETLINK_EXT_ACK:
1791 		flag = NETLINK_F_EXT_ACK;
1792 		break;
1793 	case NETLINK_GET_STRICT_CHK:
1794 		flag = NETLINK_F_STRICT_CHK;
1795 		break;
1796 	default:
1797 		return -ENOPROTOOPT;
1798 	}
1799 
1800 	if (len < sizeof(int))
1801 		return -EINVAL;
1802 
1803 	len = sizeof(int);
1804 	val = nlk->flags & flag ? 1 : 0;
1805 
1806 	if (put_user(len, optlen) ||
1807 	    copy_to_user(optval, &val, len))
1808 		return -EFAULT;
1809 
1810 	return 0;
1811 }
1812 
1813 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1814 {
1815 	struct nl_pktinfo info;
1816 
1817 	info.group = NETLINK_CB(skb).dst_group;
1818 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1819 }
1820 
1821 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
1822 					 struct sk_buff *skb)
1823 {
1824 	if (!NETLINK_CB(skb).nsid_is_set)
1825 		return;
1826 
1827 	put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
1828 		 &NETLINK_CB(skb).nsid);
1829 }
1830 
1831 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1832 {
1833 	struct sock *sk = sock->sk;
1834 	struct netlink_sock *nlk = nlk_sk(sk);
1835 	DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1836 	u32 dst_portid;
1837 	u32 dst_group;
1838 	struct sk_buff *skb;
1839 	int err;
1840 	struct scm_cookie scm;
1841 	u32 netlink_skb_flags = 0;
1842 
1843 	if (msg->msg_flags & MSG_OOB)
1844 		return -EOPNOTSUPP;
1845 
1846 	if (len == 0) {
1847 		pr_warn_once("Zero length message leads to an empty skb\n");
1848 		return -ENODATA;
1849 	}
1850 
1851 	err = scm_send(sock, msg, &scm, true);
1852 	if (err < 0)
1853 		return err;
1854 
1855 	if (msg->msg_namelen) {
1856 		err = -EINVAL;
1857 		if (msg->msg_namelen < sizeof(struct sockaddr_nl))
1858 			goto out;
1859 		if (addr->nl_family != AF_NETLINK)
1860 			goto out;
1861 		dst_portid = addr->nl_pid;
1862 		dst_group = ffs(addr->nl_groups);
1863 		err =  -EPERM;
1864 		if ((dst_group || dst_portid) &&
1865 		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1866 			goto out;
1867 		netlink_skb_flags |= NETLINK_SKB_DST;
1868 	} else {
1869 		/* Paired with WRITE_ONCE() in netlink_connect() */
1870 		dst_portid = READ_ONCE(nlk->dst_portid);
1871 		dst_group = READ_ONCE(nlk->dst_group);
1872 	}
1873 
1874 	/* Paired with WRITE_ONCE() in netlink_insert() */
1875 	if (!READ_ONCE(nlk->bound)) {
1876 		err = netlink_autobind(sock);
1877 		if (err)
1878 			goto out;
1879 	} else {
1880 		/* Ensure nlk is hashed and visible. */
1881 		smp_rmb();
1882 	}
1883 
1884 	err = -EMSGSIZE;
1885 	if (len > sk->sk_sndbuf - 32)
1886 		goto out;
1887 	err = -ENOBUFS;
1888 	skb = netlink_alloc_large_skb(len, dst_group);
1889 	if (skb == NULL)
1890 		goto out;
1891 
1892 	NETLINK_CB(skb).portid	= nlk->portid;
1893 	NETLINK_CB(skb).dst_group = dst_group;
1894 	NETLINK_CB(skb).creds	= scm.creds;
1895 	NETLINK_CB(skb).flags	= netlink_skb_flags;
1896 
1897 	err = -EFAULT;
1898 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1899 		kfree_skb(skb);
1900 		goto out;
1901 	}
1902 
1903 	err = security_netlink_send(sk, skb);
1904 	if (err) {
1905 		kfree_skb(skb);
1906 		goto out;
1907 	}
1908 
1909 	if (dst_group) {
1910 		refcount_inc(&skb->users);
1911 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1912 	}
1913 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT);
1914 
1915 out:
1916 	scm_destroy(&scm);
1917 	return err;
1918 }
1919 
1920 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1921 			   int flags)
1922 {
1923 	struct scm_cookie scm;
1924 	struct sock *sk = sock->sk;
1925 	struct netlink_sock *nlk = nlk_sk(sk);
1926 	size_t copied, max_recvmsg_len;
1927 	struct sk_buff *skb, *data_skb;
1928 	int err, ret;
1929 
1930 	if (flags & MSG_OOB)
1931 		return -EOPNOTSUPP;
1932 
1933 	copied = 0;
1934 
1935 	skb = skb_recv_datagram(sk, flags, &err);
1936 	if (skb == NULL)
1937 		goto out;
1938 
1939 	data_skb = skb;
1940 
1941 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1942 	if (unlikely(skb_shinfo(skb)->frag_list)) {
1943 		/*
1944 		 * If this skb has a frag_list, then here that means that we
1945 		 * will have to use the frag_list skb's data for compat tasks
1946 		 * and the regular skb's data for normal (non-compat) tasks.
1947 		 *
1948 		 * If we need to send the compat skb, assign it to the
1949 		 * 'data_skb' variable so that it will be used below for data
1950 		 * copying. We keep 'skb' for everything else, including
1951 		 * freeing both later.
1952 		 */
1953 		if (flags & MSG_CMSG_COMPAT)
1954 			data_skb = skb_shinfo(skb)->frag_list;
1955 	}
1956 #endif
1957 
1958 	/* Record the max length of recvmsg() calls for future allocations */
1959 	max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
1960 	max_recvmsg_len = min_t(size_t, max_recvmsg_len,
1961 				SKB_WITH_OVERHEAD(32768));
1962 	WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
1963 
1964 	copied = data_skb->len;
1965 	if (len < copied) {
1966 		msg->msg_flags |= MSG_TRUNC;
1967 		copied = len;
1968 	}
1969 
1970 	err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1971 
1972 	if (msg->msg_name) {
1973 		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1974 		addr->nl_family = AF_NETLINK;
1975 		addr->nl_pad    = 0;
1976 		addr->nl_pid	= NETLINK_CB(skb).portid;
1977 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
1978 		msg->msg_namelen = sizeof(*addr);
1979 	}
1980 
1981 	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
1982 		netlink_cmsg_recv_pktinfo(msg, skb);
1983 	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
1984 		netlink_cmsg_listen_all_nsid(sk, msg, skb);
1985 
1986 	memset(&scm, 0, sizeof(scm));
1987 	scm.creds = *NETLINK_CREDS(skb);
1988 	if (flags & MSG_TRUNC)
1989 		copied = data_skb->len;
1990 
1991 	skb_free_datagram(sk, skb);
1992 
1993 	if (nlk->cb_running &&
1994 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1995 		ret = netlink_dump(sk);
1996 		if (ret) {
1997 			sk->sk_err = -ret;
1998 			sk_error_report(sk);
1999 		}
2000 	}
2001 
2002 	scm_recv(sock, msg, &scm, flags);
2003 out:
2004 	netlink_rcv_wake(sk);
2005 	return err ? : copied;
2006 }
2007 
2008 static void netlink_data_ready(struct sock *sk)
2009 {
2010 	BUG();
2011 }
2012 
2013 /*
2014  *	We export these functions to other modules. They provide a
2015  *	complete set of kernel non-blocking support for message
2016  *	queueing.
2017  */
2018 
2019 struct sock *
2020 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2021 			struct netlink_kernel_cfg *cfg)
2022 {
2023 	struct socket *sock;
2024 	struct sock *sk;
2025 	struct netlink_sock *nlk;
2026 	struct listeners *listeners = NULL;
2027 	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2028 	unsigned int groups;
2029 
2030 	BUG_ON(!nl_table);
2031 
2032 	if (unit < 0 || unit >= MAX_LINKS)
2033 		return NULL;
2034 
2035 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2036 		return NULL;
2037 
2038 	if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2039 		goto out_sock_release_nosk;
2040 
2041 	sk = sock->sk;
2042 
2043 	if (!cfg || cfg->groups < 32)
2044 		groups = 32;
2045 	else
2046 		groups = cfg->groups;
2047 
2048 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2049 	if (!listeners)
2050 		goto out_sock_release;
2051 
2052 	sk->sk_data_ready = netlink_data_ready;
2053 	if (cfg && cfg->input)
2054 		nlk_sk(sk)->netlink_rcv = cfg->input;
2055 
2056 	if (netlink_insert(sk, 0))
2057 		goto out_sock_release;
2058 
2059 	nlk = nlk_sk(sk);
2060 	nlk->flags |= NETLINK_F_KERNEL_SOCKET;
2061 
2062 	netlink_table_grab();
2063 	if (!nl_table[unit].registered) {
2064 		nl_table[unit].groups = groups;
2065 		rcu_assign_pointer(nl_table[unit].listeners, listeners);
2066 		nl_table[unit].cb_mutex = cb_mutex;
2067 		nl_table[unit].module = module;
2068 		if (cfg) {
2069 			nl_table[unit].bind = cfg->bind;
2070 			nl_table[unit].unbind = cfg->unbind;
2071 			nl_table[unit].flags = cfg->flags;
2072 		}
2073 		nl_table[unit].registered = 1;
2074 	} else {
2075 		kfree(listeners);
2076 		nl_table[unit].registered++;
2077 	}
2078 	netlink_table_ungrab();
2079 	return sk;
2080 
2081 out_sock_release:
2082 	kfree(listeners);
2083 	netlink_kernel_release(sk);
2084 	return NULL;
2085 
2086 out_sock_release_nosk:
2087 	sock_release(sock);
2088 	return NULL;
2089 }
2090 EXPORT_SYMBOL(__netlink_kernel_create);
2091 
2092 void
2093 netlink_kernel_release(struct sock *sk)
2094 {
2095 	if (sk == NULL || sk->sk_socket == NULL)
2096 		return;
2097 
2098 	sock_release(sk->sk_socket);
2099 }
2100 EXPORT_SYMBOL(netlink_kernel_release);
2101 
2102 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2103 {
2104 	struct listeners *new, *old;
2105 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2106 
2107 	if (groups < 32)
2108 		groups = 32;
2109 
2110 	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2111 		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2112 		if (!new)
2113 			return -ENOMEM;
2114 		old = nl_deref_protected(tbl->listeners);
2115 		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2116 		rcu_assign_pointer(tbl->listeners, new);
2117 
2118 		kfree_rcu(old, rcu);
2119 	}
2120 	tbl->groups = groups;
2121 
2122 	return 0;
2123 }
2124 
2125 /**
2126  * netlink_change_ngroups - change number of multicast groups
2127  *
2128  * This changes the number of multicast groups that are available
2129  * on a certain netlink family. Note that it is not possible to
2130  * change the number of groups to below 32. Also note that it does
2131  * not implicitly call netlink_clear_multicast_users() when the
2132  * number of groups is reduced.
2133  *
2134  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2135  * @groups: The new number of groups.
2136  */
2137 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2138 {
2139 	int err;
2140 
2141 	netlink_table_grab();
2142 	err = __netlink_change_ngroups(sk, groups);
2143 	netlink_table_ungrab();
2144 
2145 	return err;
2146 }
2147 
2148 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2149 {
2150 	struct sock *sk;
2151 	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2152 
2153 	sk_for_each_bound(sk, &tbl->mc_list)
2154 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
2155 }
2156 
2157 struct nlmsghdr *
2158 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2159 {
2160 	struct nlmsghdr *nlh;
2161 	int size = nlmsg_msg_size(len);
2162 
2163 	nlh = skb_put(skb, NLMSG_ALIGN(size));
2164 	nlh->nlmsg_type = type;
2165 	nlh->nlmsg_len = size;
2166 	nlh->nlmsg_flags = flags;
2167 	nlh->nlmsg_pid = portid;
2168 	nlh->nlmsg_seq = seq;
2169 	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2170 		memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2171 	return nlh;
2172 }
2173 EXPORT_SYMBOL(__nlmsg_put);
2174 
2175 /*
2176  * It looks a bit ugly.
2177  * It would be better to create kernel thread.
2178  */
2179 
2180 static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
2181 			     struct netlink_callback *cb,
2182 			     struct netlink_ext_ack *extack)
2183 {
2184 	struct nlmsghdr *nlh;
2185 
2186 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
2187 			       NLM_F_MULTI | cb->answer_flags);
2188 	if (WARN_ON(!nlh))
2189 		return -ENOBUFS;
2190 
2191 	nl_dump_check_consistent(cb, nlh);
2192 	memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
2193 
2194 	if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) {
2195 		nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
2196 		if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
2197 			nlmsg_end(skb, nlh);
2198 	}
2199 
2200 	return 0;
2201 }
2202 
2203 static int netlink_dump(struct sock *sk)
2204 {
2205 	struct netlink_sock *nlk = nlk_sk(sk);
2206 	struct netlink_ext_ack extack = {};
2207 	struct netlink_callback *cb;
2208 	struct sk_buff *skb = NULL;
2209 	size_t max_recvmsg_len;
2210 	struct module *module;
2211 	int err = -ENOBUFS;
2212 	int alloc_min_size;
2213 	int alloc_size;
2214 
2215 	mutex_lock(nlk->cb_mutex);
2216 	if (!nlk->cb_running) {
2217 		err = -EINVAL;
2218 		goto errout_skb;
2219 	}
2220 
2221 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2222 		goto errout_skb;
2223 
2224 	/* NLMSG_GOODSIZE is small to avoid high order allocations being
2225 	 * required, but it makes sense to _attempt_ a 16K bytes allocation
2226 	 * to reduce number of system calls on dump operations, if user
2227 	 * ever provided a big enough buffer.
2228 	 */
2229 	cb = &nlk->cb;
2230 	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2231 
2232 	max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
2233 	if (alloc_min_size < max_recvmsg_len) {
2234 		alloc_size = max_recvmsg_len;
2235 		skb = alloc_skb(alloc_size,
2236 				(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
2237 				__GFP_NOWARN | __GFP_NORETRY);
2238 	}
2239 	if (!skb) {
2240 		alloc_size = alloc_min_size;
2241 		skb = alloc_skb(alloc_size, GFP_KERNEL);
2242 	}
2243 	if (!skb)
2244 		goto errout_skb;
2245 
2246 	/* Trim skb to allocated size. User is expected to provide buffer as
2247 	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2248 	 * netlink_recvmsg())). dump will pack as many smaller messages as
2249 	 * could fit within the allocated skb. skb is typically allocated
2250 	 * with larger space than required (could be as much as near 2x the
2251 	 * requested size with align to next power of 2 approach). Allowing
2252 	 * dump to use the excess space makes it difficult for a user to have a
2253 	 * reasonable static buffer based on the expected largest dump of a
2254 	 * single netdev. The outcome is MSG_TRUNC error.
2255 	 */
2256 	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2257 
2258 	/* Make sure malicious BPF programs can not read unitialized memory
2259 	 * from skb->head -> skb->data
2260 	 */
2261 	skb_reset_network_header(skb);
2262 	skb_reset_mac_header(skb);
2263 
2264 	netlink_skb_set_owner_r(skb, sk);
2265 
2266 	if (nlk->dump_done_errno > 0) {
2267 		cb->extack = &extack;
2268 		nlk->dump_done_errno = cb->dump(skb, cb);
2269 		cb->extack = NULL;
2270 	}
2271 
2272 	if (nlk->dump_done_errno > 0 ||
2273 	    skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
2274 		mutex_unlock(nlk->cb_mutex);
2275 
2276 		if (sk_filter(sk, skb))
2277 			kfree_skb(skb);
2278 		else
2279 			__netlink_sendskb(sk, skb);
2280 		return 0;
2281 	}
2282 
2283 	if (netlink_dump_done(nlk, skb, cb, &extack))
2284 		goto errout_skb;
2285 
2286 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2287 	/* frag_list skb's data is used for compat tasks
2288 	 * and the regular skb's data for normal (non-compat) tasks.
2289 	 * See netlink_recvmsg().
2290 	 */
2291 	if (unlikely(skb_shinfo(skb)->frag_list)) {
2292 		if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
2293 			goto errout_skb;
2294 	}
2295 #endif
2296 
2297 	if (sk_filter(sk, skb))
2298 		kfree_skb(skb);
2299 	else
2300 		__netlink_sendskb(sk, skb);
2301 
2302 	if (cb->done)
2303 		cb->done(cb);
2304 
2305 	nlk->cb_running = false;
2306 	module = cb->module;
2307 	skb = cb->skb;
2308 	mutex_unlock(nlk->cb_mutex);
2309 	module_put(module);
2310 	consume_skb(skb);
2311 	return 0;
2312 
2313 errout_skb:
2314 	mutex_unlock(nlk->cb_mutex);
2315 	kfree_skb(skb);
2316 	return err;
2317 }
2318 
2319 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2320 			 const struct nlmsghdr *nlh,
2321 			 struct netlink_dump_control *control)
2322 {
2323 	struct netlink_sock *nlk, *nlk2;
2324 	struct netlink_callback *cb;
2325 	struct sock *sk;
2326 	int ret;
2327 
2328 	refcount_inc(&skb->users);
2329 
2330 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2331 	if (sk == NULL) {
2332 		ret = -ECONNREFUSED;
2333 		goto error_free;
2334 	}
2335 
2336 	nlk = nlk_sk(sk);
2337 	mutex_lock(nlk->cb_mutex);
2338 	/* A dump is in progress... */
2339 	if (nlk->cb_running) {
2340 		ret = -EBUSY;
2341 		goto error_unlock;
2342 	}
2343 	/* add reference of module which cb->dump belongs to */
2344 	if (!try_module_get(control->module)) {
2345 		ret = -EPROTONOSUPPORT;
2346 		goto error_unlock;
2347 	}
2348 
2349 	cb = &nlk->cb;
2350 	memset(cb, 0, sizeof(*cb));
2351 	cb->dump = control->dump;
2352 	cb->done = control->done;
2353 	cb->nlh = nlh;
2354 	cb->data = control->data;
2355 	cb->module = control->module;
2356 	cb->min_dump_alloc = control->min_dump_alloc;
2357 	cb->skb = skb;
2358 
2359 	nlk2 = nlk_sk(NETLINK_CB(skb).sk);
2360 	cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
2361 
2362 	if (control->start) {
2363 		ret = control->start(cb);
2364 		if (ret)
2365 			goto error_put;
2366 	}
2367 
2368 	nlk->cb_running = true;
2369 	nlk->dump_done_errno = INT_MAX;
2370 
2371 	mutex_unlock(nlk->cb_mutex);
2372 
2373 	ret = netlink_dump(sk);
2374 
2375 	sock_put(sk);
2376 
2377 	if (ret)
2378 		return ret;
2379 
2380 	/* We successfully started a dump, by returning -EINTR we
2381 	 * signal not to send ACK even if it was requested.
2382 	 */
2383 	return -EINTR;
2384 
2385 error_put:
2386 	module_put(control->module);
2387 error_unlock:
2388 	sock_put(sk);
2389 	mutex_unlock(nlk->cb_mutex);
2390 error_free:
2391 	kfree_skb(skb);
2392 	return ret;
2393 }
2394 EXPORT_SYMBOL(__netlink_dump_start);
2395 
2396 static size_t
2397 netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
2398 		    const struct netlink_ext_ack *extack)
2399 {
2400 	size_t tlvlen;
2401 
2402 	if (!extack || !(nlk->flags & NETLINK_F_EXT_ACK))
2403 		return 0;
2404 
2405 	tlvlen = 0;
2406 	if (extack->_msg)
2407 		tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2408 	if (extack->cookie_len)
2409 		tlvlen += nla_total_size(extack->cookie_len);
2410 
2411 	/* Following attributes are only reported as error (not warning) */
2412 	if (!err)
2413 		return tlvlen;
2414 
2415 	if (extack->bad_attr)
2416 		tlvlen += nla_total_size(sizeof(u32));
2417 	if (extack->policy)
2418 		tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
2419 	if (extack->miss_type)
2420 		tlvlen += nla_total_size(sizeof(u32));
2421 	if (extack->miss_nest)
2422 		tlvlen += nla_total_size(sizeof(u32));
2423 
2424 	return tlvlen;
2425 }
2426 
2427 static void
2428 netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
2429 		     struct nlmsghdr *nlh, int err,
2430 		     const struct netlink_ext_ack *extack)
2431 {
2432 	if (extack->_msg)
2433 		WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg));
2434 	if (extack->cookie_len)
2435 		WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
2436 				extack->cookie_len, extack->cookie));
2437 
2438 	if (!err)
2439 		return;
2440 
2441 	if (extack->bad_attr &&
2442 	    !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
2443 		     (u8 *)extack->bad_attr >= in_skb->data + in_skb->len))
2444 		WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
2445 				    (u8 *)extack->bad_attr - (u8 *)nlh));
2446 	if (extack->policy)
2447 		netlink_policy_dump_write_attr(skb, extack->policy,
2448 					       NLMSGERR_ATTR_POLICY);
2449 	if (extack->miss_type)
2450 		WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE,
2451 				    extack->miss_type));
2452 	if (extack->miss_nest &&
2453 	    !WARN_ON((u8 *)extack->miss_nest < in_skb->data ||
2454 		     (u8 *)extack->miss_nest > in_skb->data + in_skb->len))
2455 		WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST,
2456 				    (u8 *)extack->miss_nest - (u8 *)nlh));
2457 }
2458 
2459 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2460 		 const struct netlink_ext_ack *extack)
2461 {
2462 	struct sk_buff *skb;
2463 	struct nlmsghdr *rep;
2464 	struct nlmsgerr *errmsg;
2465 	size_t payload = sizeof(*errmsg);
2466 	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2467 	unsigned int flags = 0;
2468 	size_t tlvlen;
2469 
2470 	/* Error messages get the original request appened, unless the user
2471 	 * requests to cap the error message, and get extra error data if
2472 	 * requested.
2473 	 */
2474 	if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
2475 		payload += nlmsg_len(nlh);
2476 	else
2477 		flags |= NLM_F_CAPPED;
2478 
2479 	tlvlen = netlink_ack_tlv_len(nlk, err, extack);
2480 	if (tlvlen)
2481 		flags |= NLM_F_ACK_TLVS;
2482 
2483 	skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
2484 	if (!skb)
2485 		goto err_skb;
2486 
2487 	rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2488 			NLMSG_ERROR, sizeof(*errmsg), flags);
2489 	if (!rep)
2490 		goto err_bad_put;
2491 	errmsg = nlmsg_data(rep);
2492 	errmsg->error = err;
2493 	errmsg->msg = *nlh;
2494 
2495 	if (!(flags & NLM_F_CAPPED)) {
2496 		if (!nlmsg_append(skb, nlmsg_len(nlh)))
2497 			goto err_bad_put;
2498 
2499 		memcpy(nlmsg_data(&errmsg->msg), nlmsg_data(nlh),
2500 		       nlmsg_len(nlh));
2501 	}
2502 
2503 	if (tlvlen)
2504 		netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
2505 
2506 	nlmsg_end(skb, rep);
2507 
2508 	nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
2509 
2510 	return;
2511 
2512 err_bad_put:
2513 	nlmsg_free(skb);
2514 err_skb:
2515 	NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
2516 	sk_error_report(NETLINK_CB(in_skb).sk);
2517 }
2518 EXPORT_SYMBOL(netlink_ack);
2519 
2520 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2521 						   struct nlmsghdr *,
2522 						   struct netlink_ext_ack *))
2523 {
2524 	struct netlink_ext_ack extack;
2525 	struct nlmsghdr *nlh;
2526 	int err;
2527 
2528 	while (skb->len >= nlmsg_total_size(0)) {
2529 		int msglen;
2530 
2531 		memset(&extack, 0, sizeof(extack));
2532 		nlh = nlmsg_hdr(skb);
2533 		err = 0;
2534 
2535 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2536 			return 0;
2537 
2538 		/* Only requests are handled by the kernel */
2539 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2540 			goto ack;
2541 
2542 		/* Skip control messages */
2543 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2544 			goto ack;
2545 
2546 		err = cb(skb, nlh, &extack);
2547 		if (err == -EINTR)
2548 			goto skip;
2549 
2550 ack:
2551 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
2552 			netlink_ack(skb, nlh, err, &extack);
2553 
2554 skip:
2555 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2556 		if (msglen > skb->len)
2557 			msglen = skb->len;
2558 		skb_pull(skb, msglen);
2559 	}
2560 
2561 	return 0;
2562 }
2563 EXPORT_SYMBOL(netlink_rcv_skb);
2564 
2565 /**
2566  * nlmsg_notify - send a notification netlink message
2567  * @sk: netlink socket to use
2568  * @skb: notification message
2569  * @portid: destination netlink portid for reports or 0
2570  * @group: destination multicast group or 0
2571  * @report: 1 to report back, 0 to disable
2572  * @flags: allocation flags
2573  */
2574 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2575 		 unsigned int group, int report, gfp_t flags)
2576 {
2577 	int err = 0;
2578 
2579 	if (group) {
2580 		int exclude_portid = 0;
2581 
2582 		if (report) {
2583 			refcount_inc(&skb->users);
2584 			exclude_portid = portid;
2585 		}
2586 
2587 		/* errors reported via destination sk->sk_err, but propagate
2588 		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2589 		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2590 		if (err == -ESRCH)
2591 			err = 0;
2592 	}
2593 
2594 	if (report) {
2595 		int err2;
2596 
2597 		err2 = nlmsg_unicast(sk, skb, portid);
2598 		if (!err)
2599 			err = err2;
2600 	}
2601 
2602 	return err;
2603 }
2604 EXPORT_SYMBOL(nlmsg_notify);
2605 
2606 #ifdef CONFIG_PROC_FS
2607 struct nl_seq_iter {
2608 	struct seq_net_private p;
2609 	struct rhashtable_iter hti;
2610 	int link;
2611 };
2612 
2613 static void netlink_walk_start(struct nl_seq_iter *iter)
2614 {
2615 	rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
2616 	rhashtable_walk_start(&iter->hti);
2617 }
2618 
2619 static void netlink_walk_stop(struct nl_seq_iter *iter)
2620 {
2621 	rhashtable_walk_stop(&iter->hti);
2622 	rhashtable_walk_exit(&iter->hti);
2623 }
2624 
2625 static void *__netlink_seq_next(struct seq_file *seq)
2626 {
2627 	struct nl_seq_iter *iter = seq->private;
2628 	struct netlink_sock *nlk;
2629 
2630 	do {
2631 		for (;;) {
2632 			nlk = rhashtable_walk_next(&iter->hti);
2633 
2634 			if (IS_ERR(nlk)) {
2635 				if (PTR_ERR(nlk) == -EAGAIN)
2636 					continue;
2637 
2638 				return nlk;
2639 			}
2640 
2641 			if (nlk)
2642 				break;
2643 
2644 			netlink_walk_stop(iter);
2645 			if (++iter->link >= MAX_LINKS)
2646 				return NULL;
2647 
2648 			netlink_walk_start(iter);
2649 		}
2650 	} while (sock_net(&nlk->sk) != seq_file_net(seq));
2651 
2652 	return nlk;
2653 }
2654 
2655 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2656 	__acquires(RCU)
2657 {
2658 	struct nl_seq_iter *iter = seq->private;
2659 	void *obj = SEQ_START_TOKEN;
2660 	loff_t pos;
2661 
2662 	iter->link = 0;
2663 
2664 	netlink_walk_start(iter);
2665 
2666 	for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2667 		obj = __netlink_seq_next(seq);
2668 
2669 	return obj;
2670 }
2671 
2672 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2673 {
2674 	++*pos;
2675 	return __netlink_seq_next(seq);
2676 }
2677 
2678 static void netlink_native_seq_stop(struct seq_file *seq, void *v)
2679 {
2680 	struct nl_seq_iter *iter = seq->private;
2681 
2682 	if (iter->link >= MAX_LINKS)
2683 		return;
2684 
2685 	netlink_walk_stop(iter);
2686 }
2687 
2688 
2689 static int netlink_native_seq_show(struct seq_file *seq, void *v)
2690 {
2691 	if (v == SEQ_START_TOKEN) {
2692 		seq_puts(seq,
2693 			 "sk               Eth Pid        Groups   "
2694 			 "Rmem     Wmem     Dump  Locks    Drops    Inode\n");
2695 	} else {
2696 		struct sock *s = v;
2697 		struct netlink_sock *nlk = nlk_sk(s);
2698 
2699 		seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
2700 			   s,
2701 			   s->sk_protocol,
2702 			   nlk->portid,
2703 			   nlk->groups ? (u32)nlk->groups[0] : 0,
2704 			   sk_rmem_alloc_get(s),
2705 			   sk_wmem_alloc_get(s),
2706 			   nlk->cb_running,
2707 			   refcount_read(&s->sk_refcnt),
2708 			   atomic_read(&s->sk_drops),
2709 			   sock_i_ino(s)
2710 			);
2711 
2712 	}
2713 	return 0;
2714 }
2715 
2716 #ifdef CONFIG_BPF_SYSCALL
2717 struct bpf_iter__netlink {
2718 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
2719 	__bpf_md_ptr(struct netlink_sock *, sk);
2720 };
2721 
2722 DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)
2723 
2724 static int netlink_prog_seq_show(struct bpf_prog *prog,
2725 				  struct bpf_iter_meta *meta,
2726 				  void *v)
2727 {
2728 	struct bpf_iter__netlink ctx;
2729 
2730 	meta->seq_num--;  /* skip SEQ_START_TOKEN */
2731 	ctx.meta = meta;
2732 	ctx.sk = nlk_sk((struct sock *)v);
2733 	return bpf_iter_run_prog(prog, &ctx);
2734 }
2735 
2736 static int netlink_seq_show(struct seq_file *seq, void *v)
2737 {
2738 	struct bpf_iter_meta meta;
2739 	struct bpf_prog *prog;
2740 
2741 	meta.seq = seq;
2742 	prog = bpf_iter_get_info(&meta, false);
2743 	if (!prog)
2744 		return netlink_native_seq_show(seq, v);
2745 
2746 	if (v != SEQ_START_TOKEN)
2747 		return netlink_prog_seq_show(prog, &meta, v);
2748 
2749 	return 0;
2750 }
2751 
2752 static void netlink_seq_stop(struct seq_file *seq, void *v)
2753 {
2754 	struct bpf_iter_meta meta;
2755 	struct bpf_prog *prog;
2756 
2757 	if (!v) {
2758 		meta.seq = seq;
2759 		prog = bpf_iter_get_info(&meta, true);
2760 		if (prog)
2761 			(void)netlink_prog_seq_show(prog, &meta, v);
2762 	}
2763 
2764 	netlink_native_seq_stop(seq, v);
2765 }
2766 #else
2767 static int netlink_seq_show(struct seq_file *seq, void *v)
2768 {
2769 	return netlink_native_seq_show(seq, v);
2770 }
2771 
2772 static void netlink_seq_stop(struct seq_file *seq, void *v)
2773 {
2774 	netlink_native_seq_stop(seq, v);
2775 }
2776 #endif
2777 
2778 static const struct seq_operations netlink_seq_ops = {
2779 	.start  = netlink_seq_start,
2780 	.next   = netlink_seq_next,
2781 	.stop   = netlink_seq_stop,
2782 	.show   = netlink_seq_show,
2783 };
2784 #endif
2785 
2786 int netlink_register_notifier(struct notifier_block *nb)
2787 {
2788 	return blocking_notifier_chain_register(&netlink_chain, nb);
2789 }
2790 EXPORT_SYMBOL(netlink_register_notifier);
2791 
2792 int netlink_unregister_notifier(struct notifier_block *nb)
2793 {
2794 	return blocking_notifier_chain_unregister(&netlink_chain, nb);
2795 }
2796 EXPORT_SYMBOL(netlink_unregister_notifier);
2797 
2798 static const struct proto_ops netlink_ops = {
2799 	.family =	PF_NETLINK,
2800 	.owner =	THIS_MODULE,
2801 	.release =	netlink_release,
2802 	.bind =		netlink_bind,
2803 	.connect =	netlink_connect,
2804 	.socketpair =	sock_no_socketpair,
2805 	.accept =	sock_no_accept,
2806 	.getname =	netlink_getname,
2807 	.poll =		datagram_poll,
2808 	.ioctl =	netlink_ioctl,
2809 	.listen =	sock_no_listen,
2810 	.shutdown =	sock_no_shutdown,
2811 	.setsockopt =	netlink_setsockopt,
2812 	.getsockopt =	netlink_getsockopt,
2813 	.sendmsg =	netlink_sendmsg,
2814 	.recvmsg =	netlink_recvmsg,
2815 	.mmap =		sock_no_mmap,
2816 	.sendpage =	sock_no_sendpage,
2817 };
2818 
2819 static const struct net_proto_family netlink_family_ops = {
2820 	.family = PF_NETLINK,
2821 	.create = netlink_create,
2822 	.owner	= THIS_MODULE,	/* for consistency 8) */
2823 };
2824 
2825 static int __net_init netlink_net_init(struct net *net)
2826 {
2827 #ifdef CONFIG_PROC_FS
2828 	if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops,
2829 			sizeof(struct nl_seq_iter)))
2830 		return -ENOMEM;
2831 #endif
2832 	return 0;
2833 }
2834 
2835 static void __net_exit netlink_net_exit(struct net *net)
2836 {
2837 #ifdef CONFIG_PROC_FS
2838 	remove_proc_entry("netlink", net->proc_net);
2839 #endif
2840 }
2841 
2842 static void __init netlink_add_usersock_entry(void)
2843 {
2844 	struct listeners *listeners;
2845 	int groups = 32;
2846 
2847 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2848 	if (!listeners)
2849 		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2850 
2851 	netlink_table_grab();
2852 
2853 	nl_table[NETLINK_USERSOCK].groups = groups;
2854 	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2855 	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2856 	nl_table[NETLINK_USERSOCK].registered = 1;
2857 	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2858 
2859 	netlink_table_ungrab();
2860 }
2861 
2862 static struct pernet_operations __net_initdata netlink_net_ops = {
2863 	.init = netlink_net_init,
2864 	.exit = netlink_net_exit,
2865 };
2866 
2867 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2868 {
2869 	const struct netlink_sock *nlk = data;
2870 	struct netlink_compare_arg arg;
2871 
2872 	netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2873 	return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2874 }
2875 
2876 static const struct rhashtable_params netlink_rhashtable_params = {
2877 	.head_offset = offsetof(struct netlink_sock, node),
2878 	.key_len = netlink_compare_arg_len,
2879 	.obj_hashfn = netlink_hash,
2880 	.obj_cmpfn = netlink_compare,
2881 	.automatic_shrinking = true,
2882 };
2883 
2884 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2885 BTF_ID_LIST(btf_netlink_sock_id)
2886 BTF_ID(struct, netlink_sock)
2887 
2888 static const struct bpf_iter_seq_info netlink_seq_info = {
2889 	.seq_ops		= &netlink_seq_ops,
2890 	.init_seq_private	= bpf_iter_init_seq_net,
2891 	.fini_seq_private	= bpf_iter_fini_seq_net,
2892 	.seq_priv_size		= sizeof(struct nl_seq_iter),
2893 };
2894 
2895 static struct bpf_iter_reg netlink_reg_info = {
2896 	.target			= "netlink",
2897 	.ctx_arg_info_size	= 1,
2898 	.ctx_arg_info		= {
2899 		{ offsetof(struct bpf_iter__netlink, sk),
2900 		  PTR_TO_BTF_ID_OR_NULL },
2901 	},
2902 	.seq_info		= &netlink_seq_info,
2903 };
2904 
2905 static int __init bpf_iter_register(void)
2906 {
2907 	netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id;
2908 	return bpf_iter_reg_target(&netlink_reg_info);
2909 }
2910 #endif
2911 
2912 static int __init netlink_proto_init(void)
2913 {
2914 	int i;
2915 	int err = proto_register(&netlink_proto, 0);
2916 
2917 	if (err != 0)
2918 		goto out;
2919 
2920 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
2921 	err = bpf_iter_register();
2922 	if (err)
2923 		goto out;
2924 #endif
2925 
2926 	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
2927 
2928 	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2929 	if (!nl_table)
2930 		goto panic;
2931 
2932 	for (i = 0; i < MAX_LINKS; i++) {
2933 		if (rhashtable_init(&nl_table[i].hash,
2934 				    &netlink_rhashtable_params) < 0) {
2935 			while (--i > 0)
2936 				rhashtable_destroy(&nl_table[i].hash);
2937 			kfree(nl_table);
2938 			goto panic;
2939 		}
2940 	}
2941 
2942 	netlink_add_usersock_entry();
2943 
2944 	sock_register(&netlink_family_ops);
2945 	register_pernet_subsys(&netlink_net_ops);
2946 	register_pernet_subsys(&netlink_tap_net_ops);
2947 	/* The netlink device handler may be needed early. */
2948 	rtnetlink_init();
2949 out:
2950 	return err;
2951 panic:
2952 	panic("netlink_init: Cannot allocate nl_table\n");
2953 }
2954 
2955 core_initcall(netlink_proto_init);
2956