xref: /linux/net/netlink/af_netlink.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * NETLINK      Kernel-user communication protocol.
3  *
4  * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6  *
7  *		This program is free software; you can redistribute it and/or
8  *		modify it under the terms of the GNU General Public License
9  *		as published by the Free Software Foundation; either version
10  *		2 of the License, or (at your option) any later version.
11  *
12  * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13  *                               added netlink_proto_exit
14  * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15  * 				 use nlk_sk, as sk->protinfo is on a diet 8)
16  * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17  * 				 - inc module use count of module that owns
18  * 				   the kernel socket in case userspace opens
19  * 				   socket of same protocol
20  * 				 - remove all module support, since netlink is
21  * 				   mandatory if CONFIG_NET=y these days
22  */
23 
24 #include <linux/module.h>
25 
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
35 #include <linux/un.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/fs.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/security.h>
50 #include <linux/jhash.h>
51 #include <linux/jiffies.h>
52 #include <linux/random.h>
53 #include <linux/bitops.h>
54 #include <linux/mm.h>
55 #include <linux/types.h>
56 #include <linux/audit.h>
57 #include <linux/mutex.h>
58 
59 #include <net/net_namespace.h>
60 #include <net/sock.h>
61 #include <net/scm.h>
62 #include <net/netlink.h>
63 
64 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
65 #define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
66 
67 struct netlink_sock {
68 	/* struct sock has to be the first member of netlink_sock */
69 	struct sock		sk;
70 	u32			pid;
71 	u32			dst_pid;
72 	u32			dst_group;
73 	u32			flags;
74 	u32			subscriptions;
75 	u32			ngroups;
76 	unsigned long		*groups;
77 	unsigned long		state;
78 	wait_queue_head_t	wait;
79 	struct netlink_callback	*cb;
80 	struct mutex		*cb_mutex;
81 	struct mutex		cb_def_mutex;
82 	void			(*netlink_rcv)(struct sk_buff *skb);
83 	struct module		*module;
84 };
85 
86 struct listeners {
87 	struct rcu_head		rcu;
88 	unsigned long		masks[0];
89 };
90 
91 #define NETLINK_KERNEL_SOCKET	0x1
92 #define NETLINK_RECV_PKTINFO	0x2
93 #define NETLINK_BROADCAST_SEND_ERROR	0x4
94 #define NETLINK_RECV_NO_ENOBUFS	0x8
95 
96 static inline struct netlink_sock *nlk_sk(struct sock *sk)
97 {
98 	return container_of(sk, struct netlink_sock, sk);
99 }
100 
101 static inline int netlink_is_kernel(struct sock *sk)
102 {
103 	return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
104 }
105 
106 struct nl_pid_hash {
107 	struct hlist_head	*table;
108 	unsigned long		rehash_time;
109 
110 	unsigned int		mask;
111 	unsigned int		shift;
112 
113 	unsigned int		entries;
114 	unsigned int		max_shift;
115 
116 	u32			rnd;
117 };
118 
119 struct netlink_table {
120 	struct nl_pid_hash	hash;
121 	struct hlist_head	mc_list;
122 	struct listeners __rcu	*listeners;
123 	unsigned int		nl_nonroot;
124 	unsigned int		groups;
125 	struct mutex		*cb_mutex;
126 	struct module		*module;
127 	int			registered;
128 };
129 
130 static struct netlink_table *nl_table;
131 
132 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
133 
134 static int netlink_dump(struct sock *sk);
135 
136 static DEFINE_RWLOCK(nl_table_lock);
137 static atomic_t nl_table_users = ATOMIC_INIT(0);
138 
139 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
140 
141 static inline u32 netlink_group_mask(u32 group)
142 {
143 	return group ? 1 << (group - 1) : 0;
144 }
145 
146 static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
147 {
148 	return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
149 }
150 
151 static void netlink_destroy_callback(struct netlink_callback *cb)
152 {
153 	kfree_skb(cb->skb);
154 	kfree(cb);
155 }
156 
157 static void netlink_consume_callback(struct netlink_callback *cb)
158 {
159 	consume_skb(cb->skb);
160 	kfree(cb);
161 }
162 
163 static void netlink_sock_destruct(struct sock *sk)
164 {
165 	struct netlink_sock *nlk = nlk_sk(sk);
166 
167 	if (nlk->cb) {
168 		if (nlk->cb->done)
169 			nlk->cb->done(nlk->cb);
170 		netlink_destroy_callback(nlk->cb);
171 	}
172 
173 	skb_queue_purge(&sk->sk_receive_queue);
174 
175 	if (!sock_flag(sk, SOCK_DEAD)) {
176 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
177 		return;
178 	}
179 
180 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
181 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
182 	WARN_ON(nlk_sk(sk)->groups);
183 }
184 
185 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
186  * SMP. Look, when several writers sleep and reader wakes them up, all but one
187  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
188  * this, _but_ remember, it adds useless work on UP machines.
189  */
190 
191 void netlink_table_grab(void)
192 	__acquires(nl_table_lock)
193 {
194 	might_sleep();
195 
196 	write_lock_irq(&nl_table_lock);
197 
198 	if (atomic_read(&nl_table_users)) {
199 		DECLARE_WAITQUEUE(wait, current);
200 
201 		add_wait_queue_exclusive(&nl_table_wait, &wait);
202 		for (;;) {
203 			set_current_state(TASK_UNINTERRUPTIBLE);
204 			if (atomic_read(&nl_table_users) == 0)
205 				break;
206 			write_unlock_irq(&nl_table_lock);
207 			schedule();
208 			write_lock_irq(&nl_table_lock);
209 		}
210 
211 		__set_current_state(TASK_RUNNING);
212 		remove_wait_queue(&nl_table_wait, &wait);
213 	}
214 }
215 
216 void netlink_table_ungrab(void)
217 	__releases(nl_table_lock)
218 {
219 	write_unlock_irq(&nl_table_lock);
220 	wake_up(&nl_table_wait);
221 }
222 
223 static inline void
224 netlink_lock_table(void)
225 {
226 	/* read_lock() synchronizes us to netlink_table_grab */
227 
228 	read_lock(&nl_table_lock);
229 	atomic_inc(&nl_table_users);
230 	read_unlock(&nl_table_lock);
231 }
232 
233 static inline void
234 netlink_unlock_table(void)
235 {
236 	if (atomic_dec_and_test(&nl_table_users))
237 		wake_up(&nl_table_wait);
238 }
239 
240 static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
241 {
242 	struct nl_pid_hash *hash = &nl_table[protocol].hash;
243 	struct hlist_head *head;
244 	struct sock *sk;
245 	struct hlist_node *node;
246 
247 	read_lock(&nl_table_lock);
248 	head = nl_pid_hashfn(hash, pid);
249 	sk_for_each(sk, node, head) {
250 		if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
251 			sock_hold(sk);
252 			goto found;
253 		}
254 	}
255 	sk = NULL;
256 found:
257 	read_unlock(&nl_table_lock);
258 	return sk;
259 }
260 
261 static struct hlist_head *nl_pid_hash_zalloc(size_t size)
262 {
263 	if (size <= PAGE_SIZE)
264 		return kzalloc(size, GFP_ATOMIC);
265 	else
266 		return (struct hlist_head *)
267 			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
268 					 get_order(size));
269 }
270 
271 static void nl_pid_hash_free(struct hlist_head *table, size_t size)
272 {
273 	if (size <= PAGE_SIZE)
274 		kfree(table);
275 	else
276 		free_pages((unsigned long)table, get_order(size));
277 }
278 
279 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
280 {
281 	unsigned int omask, mask, shift;
282 	size_t osize, size;
283 	struct hlist_head *otable, *table;
284 	int i;
285 
286 	omask = mask = hash->mask;
287 	osize = size = (mask + 1) * sizeof(*table);
288 	shift = hash->shift;
289 
290 	if (grow) {
291 		if (++shift > hash->max_shift)
292 			return 0;
293 		mask = mask * 2 + 1;
294 		size *= 2;
295 	}
296 
297 	table = nl_pid_hash_zalloc(size);
298 	if (!table)
299 		return 0;
300 
301 	otable = hash->table;
302 	hash->table = table;
303 	hash->mask = mask;
304 	hash->shift = shift;
305 	get_random_bytes(&hash->rnd, sizeof(hash->rnd));
306 
307 	for (i = 0; i <= omask; i++) {
308 		struct sock *sk;
309 		struct hlist_node *node, *tmp;
310 
311 		sk_for_each_safe(sk, node, tmp, &otable[i])
312 			__sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
313 	}
314 
315 	nl_pid_hash_free(otable, osize);
316 	hash->rehash_time = jiffies + 10 * 60 * HZ;
317 	return 1;
318 }
319 
320 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
321 {
322 	int avg = hash->entries >> hash->shift;
323 
324 	if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
325 		return 1;
326 
327 	if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
328 		nl_pid_hash_rehash(hash, 0);
329 		return 1;
330 	}
331 
332 	return 0;
333 }
334 
335 static const struct proto_ops netlink_ops;
336 
337 static void
338 netlink_update_listeners(struct sock *sk)
339 {
340 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
341 	struct hlist_node *node;
342 	unsigned long mask;
343 	unsigned int i;
344 
345 	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
346 		mask = 0;
347 		sk_for_each_bound(sk, node, &tbl->mc_list) {
348 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
349 				mask |= nlk_sk(sk)->groups[i];
350 		}
351 		tbl->listeners->masks[i] = mask;
352 	}
353 	/* this function is only called with the netlink table "grabbed", which
354 	 * makes sure updates are visible before bind or setsockopt return. */
355 }
356 
357 static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
358 {
359 	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
360 	struct hlist_head *head;
361 	int err = -EADDRINUSE;
362 	struct sock *osk;
363 	struct hlist_node *node;
364 	int len;
365 
366 	netlink_table_grab();
367 	head = nl_pid_hashfn(hash, pid);
368 	len = 0;
369 	sk_for_each(osk, node, head) {
370 		if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
371 			break;
372 		len++;
373 	}
374 	if (node)
375 		goto err;
376 
377 	err = -EBUSY;
378 	if (nlk_sk(sk)->pid)
379 		goto err;
380 
381 	err = -ENOMEM;
382 	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
383 		goto err;
384 
385 	if (len && nl_pid_hash_dilute(hash, len))
386 		head = nl_pid_hashfn(hash, pid);
387 	hash->entries++;
388 	nlk_sk(sk)->pid = pid;
389 	sk_add_node(sk, head);
390 	err = 0;
391 
392 err:
393 	netlink_table_ungrab();
394 	return err;
395 }
396 
397 static void netlink_remove(struct sock *sk)
398 {
399 	netlink_table_grab();
400 	if (sk_del_node_init(sk))
401 		nl_table[sk->sk_protocol].hash.entries--;
402 	if (nlk_sk(sk)->subscriptions)
403 		__sk_del_bind_node(sk);
404 	netlink_table_ungrab();
405 }
406 
407 static struct proto netlink_proto = {
408 	.name	  = "NETLINK",
409 	.owner	  = THIS_MODULE,
410 	.obj_size = sizeof(struct netlink_sock),
411 };
412 
413 static int __netlink_create(struct net *net, struct socket *sock,
414 			    struct mutex *cb_mutex, int protocol)
415 {
416 	struct sock *sk;
417 	struct netlink_sock *nlk;
418 
419 	sock->ops = &netlink_ops;
420 
421 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
422 	if (!sk)
423 		return -ENOMEM;
424 
425 	sock_init_data(sock, sk);
426 
427 	nlk = nlk_sk(sk);
428 	if (cb_mutex) {
429 		nlk->cb_mutex = cb_mutex;
430 	} else {
431 		nlk->cb_mutex = &nlk->cb_def_mutex;
432 		mutex_init(nlk->cb_mutex);
433 	}
434 	init_waitqueue_head(&nlk->wait);
435 
436 	sk->sk_destruct = netlink_sock_destruct;
437 	sk->sk_protocol = protocol;
438 	return 0;
439 }
440 
441 static int netlink_create(struct net *net, struct socket *sock, int protocol,
442 			  int kern)
443 {
444 	struct module *module = NULL;
445 	struct mutex *cb_mutex;
446 	struct netlink_sock *nlk;
447 	int err = 0;
448 
449 	sock->state = SS_UNCONNECTED;
450 
451 	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
452 		return -ESOCKTNOSUPPORT;
453 
454 	if (protocol < 0 || protocol >= MAX_LINKS)
455 		return -EPROTONOSUPPORT;
456 
457 	netlink_lock_table();
458 #ifdef CONFIG_MODULES
459 	if (!nl_table[protocol].registered) {
460 		netlink_unlock_table();
461 		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
462 		netlink_lock_table();
463 	}
464 #endif
465 	if (nl_table[protocol].registered &&
466 	    try_module_get(nl_table[protocol].module))
467 		module = nl_table[protocol].module;
468 	else
469 		err = -EPROTONOSUPPORT;
470 	cb_mutex = nl_table[protocol].cb_mutex;
471 	netlink_unlock_table();
472 
473 	if (err < 0)
474 		goto out;
475 
476 	err = __netlink_create(net, sock, cb_mutex, protocol);
477 	if (err < 0)
478 		goto out_module;
479 
480 	local_bh_disable();
481 	sock_prot_inuse_add(net, &netlink_proto, 1);
482 	local_bh_enable();
483 
484 	nlk = nlk_sk(sock->sk);
485 	nlk->module = module;
486 out:
487 	return err;
488 
489 out_module:
490 	module_put(module);
491 	goto out;
492 }
493 
494 static int netlink_release(struct socket *sock)
495 {
496 	struct sock *sk = sock->sk;
497 	struct netlink_sock *nlk;
498 
499 	if (!sk)
500 		return 0;
501 
502 	netlink_remove(sk);
503 	sock_orphan(sk);
504 	nlk = nlk_sk(sk);
505 
506 	/*
507 	 * OK. Socket is unlinked, any packets that arrive now
508 	 * will be purged.
509 	 */
510 
511 	sock->sk = NULL;
512 	wake_up_interruptible_all(&nlk->wait);
513 
514 	skb_queue_purge(&sk->sk_write_queue);
515 
516 	if (nlk->pid) {
517 		struct netlink_notify n = {
518 						.net = sock_net(sk),
519 						.protocol = sk->sk_protocol,
520 						.pid = nlk->pid,
521 					  };
522 		atomic_notifier_call_chain(&netlink_chain,
523 				NETLINK_URELEASE, &n);
524 	}
525 
526 	module_put(nlk->module);
527 
528 	netlink_table_grab();
529 	if (netlink_is_kernel(sk)) {
530 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
531 		if (--nl_table[sk->sk_protocol].registered == 0) {
532 			kfree(nl_table[sk->sk_protocol].listeners);
533 			nl_table[sk->sk_protocol].module = NULL;
534 			nl_table[sk->sk_protocol].registered = 0;
535 		}
536 	} else if (nlk->subscriptions) {
537 		netlink_update_listeners(sk);
538 	}
539 	netlink_table_ungrab();
540 
541 	kfree(nlk->groups);
542 	nlk->groups = NULL;
543 
544 	local_bh_disable();
545 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
546 	local_bh_enable();
547 	sock_put(sk);
548 	return 0;
549 }
550 
551 static int netlink_autobind(struct socket *sock)
552 {
553 	struct sock *sk = sock->sk;
554 	struct net *net = sock_net(sk);
555 	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
556 	struct hlist_head *head;
557 	struct sock *osk;
558 	struct hlist_node *node;
559 	s32 pid = task_tgid_vnr(current);
560 	int err;
561 	static s32 rover = -4097;
562 
563 retry:
564 	cond_resched();
565 	netlink_table_grab();
566 	head = nl_pid_hashfn(hash, pid);
567 	sk_for_each(osk, node, head) {
568 		if (!net_eq(sock_net(osk), net))
569 			continue;
570 		if (nlk_sk(osk)->pid == pid) {
571 			/* Bind collision, search negative pid values. */
572 			pid = rover--;
573 			if (rover > -4097)
574 				rover = -4097;
575 			netlink_table_ungrab();
576 			goto retry;
577 		}
578 	}
579 	netlink_table_ungrab();
580 
581 	err = netlink_insert(sk, net, pid);
582 	if (err == -EADDRINUSE)
583 		goto retry;
584 
585 	/* If 2 threads race to autobind, that is fine.  */
586 	if (err == -EBUSY)
587 		err = 0;
588 
589 	return err;
590 }
591 
592 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
593 {
594 	return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
595 	       capable(CAP_NET_ADMIN);
596 }
597 
598 static void
599 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
600 {
601 	struct netlink_sock *nlk = nlk_sk(sk);
602 
603 	if (nlk->subscriptions && !subscriptions)
604 		__sk_del_bind_node(sk);
605 	else if (!nlk->subscriptions && subscriptions)
606 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
607 	nlk->subscriptions = subscriptions;
608 }
609 
610 static int netlink_realloc_groups(struct sock *sk)
611 {
612 	struct netlink_sock *nlk = nlk_sk(sk);
613 	unsigned int groups;
614 	unsigned long *new_groups;
615 	int err = 0;
616 
617 	netlink_table_grab();
618 
619 	groups = nl_table[sk->sk_protocol].groups;
620 	if (!nl_table[sk->sk_protocol].registered) {
621 		err = -ENOENT;
622 		goto out_unlock;
623 	}
624 
625 	if (nlk->ngroups >= groups)
626 		goto out_unlock;
627 
628 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
629 	if (new_groups == NULL) {
630 		err = -ENOMEM;
631 		goto out_unlock;
632 	}
633 	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
634 	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
635 
636 	nlk->groups = new_groups;
637 	nlk->ngroups = groups;
638  out_unlock:
639 	netlink_table_ungrab();
640 	return err;
641 }
642 
643 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
644 			int addr_len)
645 {
646 	struct sock *sk = sock->sk;
647 	struct net *net = sock_net(sk);
648 	struct netlink_sock *nlk = nlk_sk(sk);
649 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
650 	int err;
651 
652 	if (nladdr->nl_family != AF_NETLINK)
653 		return -EINVAL;
654 
655 	/* Only superuser is allowed to listen multicasts */
656 	if (nladdr->nl_groups) {
657 		if (!netlink_capable(sock, NL_NONROOT_RECV))
658 			return -EPERM;
659 		err = netlink_realloc_groups(sk);
660 		if (err)
661 			return err;
662 	}
663 
664 	if (nlk->pid) {
665 		if (nladdr->nl_pid != nlk->pid)
666 			return -EINVAL;
667 	} else {
668 		err = nladdr->nl_pid ?
669 			netlink_insert(sk, net, nladdr->nl_pid) :
670 			netlink_autobind(sock);
671 		if (err)
672 			return err;
673 	}
674 
675 	if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
676 		return 0;
677 
678 	netlink_table_grab();
679 	netlink_update_subscriptions(sk, nlk->subscriptions +
680 					 hweight32(nladdr->nl_groups) -
681 					 hweight32(nlk->groups[0]));
682 	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
683 	netlink_update_listeners(sk);
684 	netlink_table_ungrab();
685 
686 	return 0;
687 }
688 
689 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
690 			   int alen, int flags)
691 {
692 	int err = 0;
693 	struct sock *sk = sock->sk;
694 	struct netlink_sock *nlk = nlk_sk(sk);
695 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
696 
697 	if (alen < sizeof(addr->sa_family))
698 		return -EINVAL;
699 
700 	if (addr->sa_family == AF_UNSPEC) {
701 		sk->sk_state	= NETLINK_UNCONNECTED;
702 		nlk->dst_pid	= 0;
703 		nlk->dst_group  = 0;
704 		return 0;
705 	}
706 	if (addr->sa_family != AF_NETLINK)
707 		return -EINVAL;
708 
709 	/* Only superuser is allowed to send multicasts */
710 	if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
711 		return -EPERM;
712 
713 	if (!nlk->pid)
714 		err = netlink_autobind(sock);
715 
716 	if (err == 0) {
717 		sk->sk_state	= NETLINK_CONNECTED;
718 		nlk->dst_pid 	= nladdr->nl_pid;
719 		nlk->dst_group  = ffs(nladdr->nl_groups);
720 	}
721 
722 	return err;
723 }
724 
725 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
726 			   int *addr_len, int peer)
727 {
728 	struct sock *sk = sock->sk;
729 	struct netlink_sock *nlk = nlk_sk(sk);
730 	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
731 
732 	nladdr->nl_family = AF_NETLINK;
733 	nladdr->nl_pad = 0;
734 	*addr_len = sizeof(*nladdr);
735 
736 	if (peer) {
737 		nladdr->nl_pid = nlk->dst_pid;
738 		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
739 	} else {
740 		nladdr->nl_pid = nlk->pid;
741 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
742 	}
743 	return 0;
744 }
745 
746 static void netlink_overrun(struct sock *sk)
747 {
748 	struct netlink_sock *nlk = nlk_sk(sk);
749 
750 	if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
751 		if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
752 			sk->sk_err = ENOBUFS;
753 			sk->sk_error_report(sk);
754 		}
755 	}
756 	atomic_inc(&sk->sk_drops);
757 }
758 
759 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
760 {
761 	struct sock *sock;
762 	struct netlink_sock *nlk;
763 
764 	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
765 	if (!sock)
766 		return ERR_PTR(-ECONNREFUSED);
767 
768 	/* Don't bother queuing skb if kernel socket has no input function */
769 	nlk = nlk_sk(sock);
770 	if (sock->sk_state == NETLINK_CONNECTED &&
771 	    nlk->dst_pid != nlk_sk(ssk)->pid) {
772 		sock_put(sock);
773 		return ERR_PTR(-ECONNREFUSED);
774 	}
775 	return sock;
776 }
777 
778 struct sock *netlink_getsockbyfilp(struct file *filp)
779 {
780 	struct inode *inode = filp->f_path.dentry->d_inode;
781 	struct sock *sock;
782 
783 	if (!S_ISSOCK(inode->i_mode))
784 		return ERR_PTR(-ENOTSOCK);
785 
786 	sock = SOCKET_I(inode)->sk;
787 	if (sock->sk_family != AF_NETLINK)
788 		return ERR_PTR(-EINVAL);
789 
790 	sock_hold(sock);
791 	return sock;
792 }
793 
794 /*
795  * Attach a skb to a netlink socket.
796  * The caller must hold a reference to the destination socket. On error, the
797  * reference is dropped. The skb is not send to the destination, just all
798  * all error checks are performed and memory in the queue is reserved.
799  * Return values:
800  * < 0: error. skb freed, reference to sock dropped.
801  * 0: continue
802  * 1: repeat lookup - reference dropped while waiting for socket memory.
803  */
804 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
805 		      long *timeo, struct sock *ssk)
806 {
807 	struct netlink_sock *nlk;
808 
809 	nlk = nlk_sk(sk);
810 
811 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
812 	    test_bit(0, &nlk->state)) {
813 		DECLARE_WAITQUEUE(wait, current);
814 		if (!*timeo) {
815 			if (!ssk || netlink_is_kernel(ssk))
816 				netlink_overrun(sk);
817 			sock_put(sk);
818 			kfree_skb(skb);
819 			return -EAGAIN;
820 		}
821 
822 		__set_current_state(TASK_INTERRUPTIBLE);
823 		add_wait_queue(&nlk->wait, &wait);
824 
825 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
826 		     test_bit(0, &nlk->state)) &&
827 		    !sock_flag(sk, SOCK_DEAD))
828 			*timeo = schedule_timeout(*timeo);
829 
830 		__set_current_state(TASK_RUNNING);
831 		remove_wait_queue(&nlk->wait, &wait);
832 		sock_put(sk);
833 
834 		if (signal_pending(current)) {
835 			kfree_skb(skb);
836 			return sock_intr_errno(*timeo);
837 		}
838 		return 1;
839 	}
840 	skb_set_owner_r(skb, sk);
841 	return 0;
842 }
843 
844 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
845 {
846 	int len = skb->len;
847 
848 	skb_queue_tail(&sk->sk_receive_queue, skb);
849 	sk->sk_data_ready(sk, len);
850 	return len;
851 }
852 
853 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
854 {
855 	int len = __netlink_sendskb(sk, skb);
856 
857 	sock_put(sk);
858 	return len;
859 }
860 
861 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
862 {
863 	kfree_skb(skb);
864 	sock_put(sk);
865 }
866 
867 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
868 {
869 	int delta;
870 
871 	skb_orphan(skb);
872 
873 	delta = skb->end - skb->tail;
874 	if (delta * 2 < skb->truesize)
875 		return skb;
876 
877 	if (skb_shared(skb)) {
878 		struct sk_buff *nskb = skb_clone(skb, allocation);
879 		if (!nskb)
880 			return skb;
881 		consume_skb(skb);
882 		skb = nskb;
883 	}
884 
885 	if (!pskb_expand_head(skb, 0, -delta, allocation))
886 		skb->truesize -= delta;
887 
888 	return skb;
889 }
890 
891 static void netlink_rcv_wake(struct sock *sk)
892 {
893 	struct netlink_sock *nlk = nlk_sk(sk);
894 
895 	if (skb_queue_empty(&sk->sk_receive_queue))
896 		clear_bit(0, &nlk->state);
897 	if (!test_bit(0, &nlk->state))
898 		wake_up_interruptible(&nlk->wait);
899 }
900 
901 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
902 {
903 	int ret;
904 	struct netlink_sock *nlk = nlk_sk(sk);
905 
906 	ret = -ECONNREFUSED;
907 	if (nlk->netlink_rcv != NULL) {
908 		ret = skb->len;
909 		skb_set_owner_r(skb, sk);
910 		nlk->netlink_rcv(skb);
911 		consume_skb(skb);
912 	} else {
913 		kfree_skb(skb);
914 	}
915 	sock_put(sk);
916 	return ret;
917 }
918 
919 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
920 		    u32 pid, int nonblock)
921 {
922 	struct sock *sk;
923 	int err;
924 	long timeo;
925 
926 	skb = netlink_trim(skb, gfp_any());
927 
928 	timeo = sock_sndtimeo(ssk, nonblock);
929 retry:
930 	sk = netlink_getsockbypid(ssk, pid);
931 	if (IS_ERR(sk)) {
932 		kfree_skb(skb);
933 		return PTR_ERR(sk);
934 	}
935 	if (netlink_is_kernel(sk))
936 		return netlink_unicast_kernel(sk, skb);
937 
938 	if (sk_filter(sk, skb)) {
939 		err = skb->len;
940 		kfree_skb(skb);
941 		sock_put(sk);
942 		return err;
943 	}
944 
945 	err = netlink_attachskb(sk, skb, &timeo, ssk);
946 	if (err == 1)
947 		goto retry;
948 	if (err)
949 		return err;
950 
951 	return netlink_sendskb(sk, skb);
952 }
953 EXPORT_SYMBOL(netlink_unicast);
954 
955 int netlink_has_listeners(struct sock *sk, unsigned int group)
956 {
957 	int res = 0;
958 	struct listeners *listeners;
959 
960 	BUG_ON(!netlink_is_kernel(sk));
961 
962 	rcu_read_lock();
963 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
964 
965 	if (group - 1 < nl_table[sk->sk_protocol].groups)
966 		res = test_bit(group - 1, listeners->masks);
967 
968 	rcu_read_unlock();
969 
970 	return res;
971 }
972 EXPORT_SYMBOL_GPL(netlink_has_listeners);
973 
974 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
975 {
976 	struct netlink_sock *nlk = nlk_sk(sk);
977 
978 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
979 	    !test_bit(0, &nlk->state)) {
980 		skb_set_owner_r(skb, sk);
981 		__netlink_sendskb(sk, skb);
982 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
983 	}
984 	return -1;
985 }
986 
987 struct netlink_broadcast_data {
988 	struct sock *exclude_sk;
989 	struct net *net;
990 	u32 pid;
991 	u32 group;
992 	int failure;
993 	int delivery_failure;
994 	int congested;
995 	int delivered;
996 	gfp_t allocation;
997 	struct sk_buff *skb, *skb2;
998 	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
999 	void *tx_data;
1000 };
1001 
1002 static int do_one_broadcast(struct sock *sk,
1003 				   struct netlink_broadcast_data *p)
1004 {
1005 	struct netlink_sock *nlk = nlk_sk(sk);
1006 	int val;
1007 
1008 	if (p->exclude_sk == sk)
1009 		goto out;
1010 
1011 	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1012 	    !test_bit(p->group - 1, nlk->groups))
1013 		goto out;
1014 
1015 	if (!net_eq(sock_net(sk), p->net))
1016 		goto out;
1017 
1018 	if (p->failure) {
1019 		netlink_overrun(sk);
1020 		goto out;
1021 	}
1022 
1023 	sock_hold(sk);
1024 	if (p->skb2 == NULL) {
1025 		if (skb_shared(p->skb)) {
1026 			p->skb2 = skb_clone(p->skb, p->allocation);
1027 		} else {
1028 			p->skb2 = skb_get(p->skb);
1029 			/*
1030 			 * skb ownership may have been set when
1031 			 * delivered to a previous socket.
1032 			 */
1033 			skb_orphan(p->skb2);
1034 		}
1035 	}
1036 	if (p->skb2 == NULL) {
1037 		netlink_overrun(sk);
1038 		/* Clone failed. Notify ALL listeners. */
1039 		p->failure = 1;
1040 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1041 			p->delivery_failure = 1;
1042 	} else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1043 		kfree_skb(p->skb2);
1044 		p->skb2 = NULL;
1045 	} else if (sk_filter(sk, p->skb2)) {
1046 		kfree_skb(p->skb2);
1047 		p->skb2 = NULL;
1048 	} else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1049 		netlink_overrun(sk);
1050 		if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1051 			p->delivery_failure = 1;
1052 	} else {
1053 		p->congested |= val;
1054 		p->delivered = 1;
1055 		p->skb2 = NULL;
1056 	}
1057 	sock_put(sk);
1058 
1059 out:
1060 	return 0;
1061 }
1062 
1063 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1064 	u32 group, gfp_t allocation,
1065 	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1066 	void *filter_data)
1067 {
1068 	struct net *net = sock_net(ssk);
1069 	struct netlink_broadcast_data info;
1070 	struct hlist_node *node;
1071 	struct sock *sk;
1072 
1073 	skb = netlink_trim(skb, allocation);
1074 
1075 	info.exclude_sk = ssk;
1076 	info.net = net;
1077 	info.pid = pid;
1078 	info.group = group;
1079 	info.failure = 0;
1080 	info.delivery_failure = 0;
1081 	info.congested = 0;
1082 	info.delivered = 0;
1083 	info.allocation = allocation;
1084 	info.skb = skb;
1085 	info.skb2 = NULL;
1086 	info.tx_filter = filter;
1087 	info.tx_data = filter_data;
1088 
1089 	/* While we sleep in clone, do not allow to change socket list */
1090 
1091 	netlink_lock_table();
1092 
1093 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1094 		do_one_broadcast(sk, &info);
1095 
1096 	consume_skb(skb);
1097 
1098 	netlink_unlock_table();
1099 
1100 	if (info.delivery_failure) {
1101 		kfree_skb(info.skb2);
1102 		return -ENOBUFS;
1103 	}
1104 	consume_skb(info.skb2);
1105 
1106 	if (info.delivered) {
1107 		if (info.congested && (allocation & __GFP_WAIT))
1108 			yield();
1109 		return 0;
1110 	}
1111 	return -ESRCH;
1112 }
1113 EXPORT_SYMBOL(netlink_broadcast_filtered);
1114 
1115 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1116 		      u32 group, gfp_t allocation)
1117 {
1118 	return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
1119 		NULL, NULL);
1120 }
1121 EXPORT_SYMBOL(netlink_broadcast);
1122 
1123 struct netlink_set_err_data {
1124 	struct sock *exclude_sk;
1125 	u32 pid;
1126 	u32 group;
1127 	int code;
1128 };
1129 
1130 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1131 {
1132 	struct netlink_sock *nlk = nlk_sk(sk);
1133 	int ret = 0;
1134 
1135 	if (sk == p->exclude_sk)
1136 		goto out;
1137 
1138 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1139 		goto out;
1140 
1141 	if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1142 	    !test_bit(p->group - 1, nlk->groups))
1143 		goto out;
1144 
1145 	if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1146 		ret = 1;
1147 		goto out;
1148 	}
1149 
1150 	sk->sk_err = p->code;
1151 	sk->sk_error_report(sk);
1152 out:
1153 	return ret;
1154 }
1155 
1156 /**
1157  * netlink_set_err - report error to broadcast listeners
1158  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1159  * @pid: the PID of a process that we want to skip (if any)
1160  * @groups: the broadcast group that will notice the error
1161  * @code: error code, must be negative (as usual in kernelspace)
1162  *
1163  * This function returns the number of broadcast listeners that have set the
1164  * NETLINK_RECV_NO_ENOBUFS socket option.
1165  */
1166 int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1167 {
1168 	struct netlink_set_err_data info;
1169 	struct hlist_node *node;
1170 	struct sock *sk;
1171 	int ret = 0;
1172 
1173 	info.exclude_sk = ssk;
1174 	info.pid = pid;
1175 	info.group = group;
1176 	/* sk->sk_err wants a positive error value */
1177 	info.code = -code;
1178 
1179 	read_lock(&nl_table_lock);
1180 
1181 	sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1182 		ret += do_one_set_err(sk, &info);
1183 
1184 	read_unlock(&nl_table_lock);
1185 	return ret;
1186 }
1187 EXPORT_SYMBOL(netlink_set_err);
1188 
1189 /* must be called with netlink table grabbed */
1190 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1191 				     unsigned int group,
1192 				     int is_new)
1193 {
1194 	int old, new = !!is_new, subscriptions;
1195 
1196 	old = test_bit(group - 1, nlk->groups);
1197 	subscriptions = nlk->subscriptions - old + new;
1198 	if (new)
1199 		__set_bit(group - 1, nlk->groups);
1200 	else
1201 		__clear_bit(group - 1, nlk->groups);
1202 	netlink_update_subscriptions(&nlk->sk, subscriptions);
1203 	netlink_update_listeners(&nlk->sk);
1204 }
1205 
1206 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1207 			      char __user *optval, unsigned int optlen)
1208 {
1209 	struct sock *sk = sock->sk;
1210 	struct netlink_sock *nlk = nlk_sk(sk);
1211 	unsigned int val = 0;
1212 	int err;
1213 
1214 	if (level != SOL_NETLINK)
1215 		return -ENOPROTOOPT;
1216 
1217 	if (optlen >= sizeof(int) &&
1218 	    get_user(val, (unsigned int __user *)optval))
1219 		return -EFAULT;
1220 
1221 	switch (optname) {
1222 	case NETLINK_PKTINFO:
1223 		if (val)
1224 			nlk->flags |= NETLINK_RECV_PKTINFO;
1225 		else
1226 			nlk->flags &= ~NETLINK_RECV_PKTINFO;
1227 		err = 0;
1228 		break;
1229 	case NETLINK_ADD_MEMBERSHIP:
1230 	case NETLINK_DROP_MEMBERSHIP: {
1231 		if (!netlink_capable(sock, NL_NONROOT_RECV))
1232 			return -EPERM;
1233 		err = netlink_realloc_groups(sk);
1234 		if (err)
1235 			return err;
1236 		if (!val || val - 1 >= nlk->ngroups)
1237 			return -EINVAL;
1238 		netlink_table_grab();
1239 		netlink_update_socket_mc(nlk, val,
1240 					 optname == NETLINK_ADD_MEMBERSHIP);
1241 		netlink_table_ungrab();
1242 		err = 0;
1243 		break;
1244 	}
1245 	case NETLINK_BROADCAST_ERROR:
1246 		if (val)
1247 			nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
1248 		else
1249 			nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
1250 		err = 0;
1251 		break;
1252 	case NETLINK_NO_ENOBUFS:
1253 		if (val) {
1254 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1255 			clear_bit(0, &nlk->state);
1256 			wake_up_interruptible(&nlk->wait);
1257 		} else {
1258 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1259 		}
1260 		err = 0;
1261 		break;
1262 	default:
1263 		err = -ENOPROTOOPT;
1264 	}
1265 	return err;
1266 }
1267 
1268 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1269 			      char __user *optval, int __user *optlen)
1270 {
1271 	struct sock *sk = sock->sk;
1272 	struct netlink_sock *nlk = nlk_sk(sk);
1273 	int len, val, err;
1274 
1275 	if (level != SOL_NETLINK)
1276 		return -ENOPROTOOPT;
1277 
1278 	if (get_user(len, optlen))
1279 		return -EFAULT;
1280 	if (len < 0)
1281 		return -EINVAL;
1282 
1283 	switch (optname) {
1284 	case NETLINK_PKTINFO:
1285 		if (len < sizeof(int))
1286 			return -EINVAL;
1287 		len = sizeof(int);
1288 		val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1289 		if (put_user(len, optlen) ||
1290 		    put_user(val, optval))
1291 			return -EFAULT;
1292 		err = 0;
1293 		break;
1294 	case NETLINK_BROADCAST_ERROR:
1295 		if (len < sizeof(int))
1296 			return -EINVAL;
1297 		len = sizeof(int);
1298 		val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
1299 		if (put_user(len, optlen) ||
1300 		    put_user(val, optval))
1301 			return -EFAULT;
1302 		err = 0;
1303 		break;
1304 	case NETLINK_NO_ENOBUFS:
1305 		if (len < sizeof(int))
1306 			return -EINVAL;
1307 		len = sizeof(int);
1308 		val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
1309 		if (put_user(len, optlen) ||
1310 		    put_user(val, optval))
1311 			return -EFAULT;
1312 		err = 0;
1313 		break;
1314 	default:
1315 		err = -ENOPROTOOPT;
1316 	}
1317 	return err;
1318 }
1319 
1320 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1321 {
1322 	struct nl_pktinfo info;
1323 
1324 	info.group = NETLINK_CB(skb).dst_group;
1325 	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1326 }
1327 
1328 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1329 			   struct msghdr *msg, size_t len)
1330 {
1331 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1332 	struct sock *sk = sock->sk;
1333 	struct netlink_sock *nlk = nlk_sk(sk);
1334 	struct sockaddr_nl *addr = msg->msg_name;
1335 	u32 dst_pid;
1336 	u32 dst_group;
1337 	struct sk_buff *skb;
1338 	int err;
1339 	struct scm_cookie scm;
1340 
1341 	if (msg->msg_flags&MSG_OOB)
1342 		return -EOPNOTSUPP;
1343 
1344 	if (NULL == siocb->scm)
1345 		siocb->scm = &scm;
1346 
1347 	err = scm_send(sock, msg, siocb->scm);
1348 	if (err < 0)
1349 		return err;
1350 
1351 	if (msg->msg_namelen) {
1352 		err = -EINVAL;
1353 		if (addr->nl_family != AF_NETLINK)
1354 			goto out;
1355 		dst_pid = addr->nl_pid;
1356 		dst_group = ffs(addr->nl_groups);
1357 		err =  -EPERM;
1358 		if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1359 			goto out;
1360 	} else {
1361 		dst_pid = nlk->dst_pid;
1362 		dst_group = nlk->dst_group;
1363 	}
1364 
1365 	if (!nlk->pid) {
1366 		err = netlink_autobind(sock);
1367 		if (err)
1368 			goto out;
1369 	}
1370 
1371 	err = -EMSGSIZE;
1372 	if (len > sk->sk_sndbuf - 32)
1373 		goto out;
1374 	err = -ENOBUFS;
1375 	skb = alloc_skb(len, GFP_KERNEL);
1376 	if (skb == NULL)
1377 		goto out;
1378 
1379 	NETLINK_CB(skb).pid	= nlk->pid;
1380 	NETLINK_CB(skb).dst_group = dst_group;
1381 	memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1382 
1383 	err = -EFAULT;
1384 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1385 		kfree_skb(skb);
1386 		goto out;
1387 	}
1388 
1389 	err = security_netlink_send(sk, skb);
1390 	if (err) {
1391 		kfree_skb(skb);
1392 		goto out;
1393 	}
1394 
1395 	if (dst_group) {
1396 		atomic_inc(&skb->users);
1397 		netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1398 	}
1399 	err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1400 
1401 out:
1402 	scm_destroy(siocb->scm);
1403 	return err;
1404 }
1405 
1406 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 			   struct msghdr *msg, size_t len,
1408 			   int flags)
1409 {
1410 	struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1411 	struct scm_cookie scm;
1412 	struct sock *sk = sock->sk;
1413 	struct netlink_sock *nlk = nlk_sk(sk);
1414 	int noblock = flags&MSG_DONTWAIT;
1415 	size_t copied;
1416 	struct sk_buff *skb, *data_skb;
1417 	int err, ret;
1418 
1419 	if (flags&MSG_OOB)
1420 		return -EOPNOTSUPP;
1421 
1422 	copied = 0;
1423 
1424 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1425 	if (skb == NULL)
1426 		goto out;
1427 
1428 	data_skb = skb;
1429 
1430 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1431 	if (unlikely(skb_shinfo(skb)->frag_list)) {
1432 		/*
1433 		 * If this skb has a frag_list, then here that means that we
1434 		 * will have to use the frag_list skb's data for compat tasks
1435 		 * and the regular skb's data for normal (non-compat) tasks.
1436 		 *
1437 		 * If we need to send the compat skb, assign it to the
1438 		 * 'data_skb' variable so that it will be used below for data
1439 		 * copying. We keep 'skb' for everything else, including
1440 		 * freeing both later.
1441 		 */
1442 		if (flags & MSG_CMSG_COMPAT)
1443 			data_skb = skb_shinfo(skb)->frag_list;
1444 	}
1445 #endif
1446 
1447 	msg->msg_namelen = 0;
1448 
1449 	copied = data_skb->len;
1450 	if (len < copied) {
1451 		msg->msg_flags |= MSG_TRUNC;
1452 		copied = len;
1453 	}
1454 
1455 	skb_reset_transport_header(data_skb);
1456 	err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
1457 
1458 	if (msg->msg_name) {
1459 		struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1460 		addr->nl_family = AF_NETLINK;
1461 		addr->nl_pad    = 0;
1462 		addr->nl_pid	= NETLINK_CB(skb).pid;
1463 		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
1464 		msg->msg_namelen = sizeof(*addr);
1465 	}
1466 
1467 	if (nlk->flags & NETLINK_RECV_PKTINFO)
1468 		netlink_cmsg_recv_pktinfo(msg, skb);
1469 
1470 	if (NULL == siocb->scm) {
1471 		memset(&scm, 0, sizeof(scm));
1472 		siocb->scm = &scm;
1473 	}
1474 	siocb->scm->creds = *NETLINK_CREDS(skb);
1475 	if (flags & MSG_TRUNC)
1476 		copied = data_skb->len;
1477 
1478 	skb_free_datagram(sk, skb);
1479 
1480 	if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1481 		ret = netlink_dump(sk);
1482 		if (ret) {
1483 			sk->sk_err = ret;
1484 			sk->sk_error_report(sk);
1485 		}
1486 	}
1487 
1488 	scm_recv(sock, msg, siocb->scm, flags);
1489 out:
1490 	netlink_rcv_wake(sk);
1491 	return err ? : copied;
1492 }
1493 
1494 static void netlink_data_ready(struct sock *sk, int len)
1495 {
1496 	BUG();
1497 }
1498 
1499 /*
1500  *	We export these functions to other modules. They provide a
1501  *	complete set of kernel non-blocking support for message
1502  *	queueing.
1503  */
1504 
1505 struct sock *
1506 netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1507 		      void (*input)(struct sk_buff *skb),
1508 		      struct mutex *cb_mutex, struct module *module)
1509 {
1510 	struct socket *sock;
1511 	struct sock *sk;
1512 	struct netlink_sock *nlk;
1513 	struct listeners *listeners = NULL;
1514 
1515 	BUG_ON(!nl_table);
1516 
1517 	if (unit < 0 || unit >= MAX_LINKS)
1518 		return NULL;
1519 
1520 	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1521 		return NULL;
1522 
1523 	/*
1524 	 * We have to just have a reference on the net from sk, but don't
1525 	 * get_net it. Besides, we cannot get and then put the net here.
1526 	 * So we create one inside init_net and the move it to net.
1527 	 */
1528 
1529 	if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
1530 		goto out_sock_release_nosk;
1531 
1532 	sk = sock->sk;
1533 	sk_change_net(sk, net);
1534 
1535 	if (groups < 32)
1536 		groups = 32;
1537 
1538 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1539 	if (!listeners)
1540 		goto out_sock_release;
1541 
1542 	sk->sk_data_ready = netlink_data_ready;
1543 	if (input)
1544 		nlk_sk(sk)->netlink_rcv = input;
1545 
1546 	if (netlink_insert(sk, net, 0))
1547 		goto out_sock_release;
1548 
1549 	nlk = nlk_sk(sk);
1550 	nlk->flags |= NETLINK_KERNEL_SOCKET;
1551 
1552 	netlink_table_grab();
1553 	if (!nl_table[unit].registered) {
1554 		nl_table[unit].groups = groups;
1555 		rcu_assign_pointer(nl_table[unit].listeners, listeners);
1556 		nl_table[unit].cb_mutex = cb_mutex;
1557 		nl_table[unit].module = module;
1558 		nl_table[unit].registered = 1;
1559 	} else {
1560 		kfree(listeners);
1561 		nl_table[unit].registered++;
1562 	}
1563 	netlink_table_ungrab();
1564 	return sk;
1565 
1566 out_sock_release:
1567 	kfree(listeners);
1568 	netlink_kernel_release(sk);
1569 	return NULL;
1570 
1571 out_sock_release_nosk:
1572 	sock_release(sock);
1573 	return NULL;
1574 }
1575 EXPORT_SYMBOL(netlink_kernel_create);
1576 
1577 
1578 void
1579 netlink_kernel_release(struct sock *sk)
1580 {
1581 	sk_release_kernel(sk);
1582 }
1583 EXPORT_SYMBOL(netlink_kernel_release);
1584 
1585 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1586 {
1587 	struct listeners *new, *old;
1588 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1589 
1590 	if (groups < 32)
1591 		groups = 32;
1592 
1593 	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
1594 		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1595 		if (!new)
1596 			return -ENOMEM;
1597 		old = rcu_dereference_protected(tbl->listeners, 1);
1598 		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1599 		rcu_assign_pointer(tbl->listeners, new);
1600 
1601 		kfree_rcu(old, rcu);
1602 	}
1603 	tbl->groups = groups;
1604 
1605 	return 0;
1606 }
1607 
1608 /**
1609  * netlink_change_ngroups - change number of multicast groups
1610  *
1611  * This changes the number of multicast groups that are available
1612  * on a certain netlink family. Note that it is not possible to
1613  * change the number of groups to below 32. Also note that it does
1614  * not implicitly call netlink_clear_multicast_users() when the
1615  * number of groups is reduced.
1616  *
1617  * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
1618  * @groups: The new number of groups.
1619  */
1620 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
1621 {
1622 	int err;
1623 
1624 	netlink_table_grab();
1625 	err = __netlink_change_ngroups(sk, groups);
1626 	netlink_table_ungrab();
1627 
1628 	return err;
1629 }
1630 
1631 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1632 {
1633 	struct sock *sk;
1634 	struct hlist_node *node;
1635 	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
1636 
1637 	sk_for_each_bound(sk, node, &tbl->mc_list)
1638 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
1639 }
1640 
1641 /**
1642  * netlink_clear_multicast_users - kick off multicast listeners
1643  *
1644  * This function removes all listeners from the given group.
1645  * @ksk: The kernel netlink socket, as returned by
1646  *	netlink_kernel_create().
1647  * @group: The multicast group to clear.
1648  */
1649 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1650 {
1651 	netlink_table_grab();
1652 	__netlink_clear_multicast_users(ksk, group);
1653 	netlink_table_ungrab();
1654 }
1655 
1656 void netlink_set_nonroot(int protocol, unsigned int flags)
1657 {
1658 	if ((unsigned int)protocol < MAX_LINKS)
1659 		nl_table[protocol].nl_nonroot = flags;
1660 }
1661 EXPORT_SYMBOL(netlink_set_nonroot);
1662 
1663 struct nlmsghdr *
1664 __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1665 {
1666 	struct nlmsghdr *nlh;
1667 	int size = NLMSG_LENGTH(len);
1668 
1669 	nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1670 	nlh->nlmsg_type = type;
1671 	nlh->nlmsg_len = size;
1672 	nlh->nlmsg_flags = flags;
1673 	nlh->nlmsg_pid = pid;
1674 	nlh->nlmsg_seq = seq;
1675 	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1676 		memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1677 	return nlh;
1678 }
1679 EXPORT_SYMBOL(__nlmsg_put);
1680 
1681 /*
1682  * It looks a bit ugly.
1683  * It would be better to create kernel thread.
1684  */
1685 
1686 static int netlink_dump(struct sock *sk)
1687 {
1688 	struct netlink_sock *nlk = nlk_sk(sk);
1689 	struct netlink_callback *cb;
1690 	struct sk_buff *skb = NULL;
1691 	struct nlmsghdr *nlh;
1692 	int len, err = -ENOBUFS;
1693 	int alloc_size;
1694 
1695 	mutex_lock(nlk->cb_mutex);
1696 
1697 	cb = nlk->cb;
1698 	if (cb == NULL) {
1699 		err = -EINVAL;
1700 		goto errout_skb;
1701 	}
1702 
1703 	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1704 
1705 	skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1706 	if (!skb)
1707 		goto errout_skb;
1708 
1709 	len = cb->dump(skb, cb);
1710 
1711 	if (len > 0) {
1712 		mutex_unlock(nlk->cb_mutex);
1713 
1714 		if (sk_filter(sk, skb))
1715 			kfree_skb(skb);
1716 		else
1717 			__netlink_sendskb(sk, skb);
1718 		return 0;
1719 	}
1720 
1721 	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1722 	if (!nlh)
1723 		goto errout_skb;
1724 
1725 	nl_dump_check_consistent(cb, nlh);
1726 
1727 	memcpy(nlmsg_data(nlh), &len, sizeof(len));
1728 
1729 	if (sk_filter(sk, skb))
1730 		kfree_skb(skb);
1731 	else
1732 		__netlink_sendskb(sk, skb);
1733 
1734 	if (cb->done)
1735 		cb->done(cb);
1736 	nlk->cb = NULL;
1737 	mutex_unlock(nlk->cb_mutex);
1738 
1739 	netlink_consume_callback(cb);
1740 	return 0;
1741 
1742 errout_skb:
1743 	mutex_unlock(nlk->cb_mutex);
1744 	kfree_skb(skb);
1745 	return err;
1746 }
1747 
1748 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1749 		       const struct nlmsghdr *nlh,
1750 		       struct netlink_dump_control *control)
1751 {
1752 	struct netlink_callback *cb;
1753 	struct sock *sk;
1754 	struct netlink_sock *nlk;
1755 	int ret;
1756 
1757 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1758 	if (cb == NULL)
1759 		return -ENOBUFS;
1760 
1761 	cb->dump = control->dump;
1762 	cb->done = control->done;
1763 	cb->nlh = nlh;
1764 	cb->data = control->data;
1765 	cb->min_dump_alloc = control->min_dump_alloc;
1766 	atomic_inc(&skb->users);
1767 	cb->skb = skb;
1768 
1769 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1770 	if (sk == NULL) {
1771 		netlink_destroy_callback(cb);
1772 		return -ECONNREFUSED;
1773 	}
1774 	nlk = nlk_sk(sk);
1775 	/* A dump is in progress... */
1776 	mutex_lock(nlk->cb_mutex);
1777 	if (nlk->cb) {
1778 		mutex_unlock(nlk->cb_mutex);
1779 		netlink_destroy_callback(cb);
1780 		sock_put(sk);
1781 		return -EBUSY;
1782 	}
1783 	nlk->cb = cb;
1784 	mutex_unlock(nlk->cb_mutex);
1785 
1786 	ret = netlink_dump(sk);
1787 
1788 	sock_put(sk);
1789 
1790 	if (ret)
1791 		return ret;
1792 
1793 	/* We successfully started a dump, by returning -EINTR we
1794 	 * signal not to send ACK even if it was requested.
1795 	 */
1796 	return -EINTR;
1797 }
1798 EXPORT_SYMBOL(netlink_dump_start);
1799 
1800 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1801 {
1802 	struct sk_buff *skb;
1803 	struct nlmsghdr *rep;
1804 	struct nlmsgerr *errmsg;
1805 	size_t payload = sizeof(*errmsg);
1806 
1807 	/* error messages get the original request appened */
1808 	if (err)
1809 		payload += nlmsg_len(nlh);
1810 
1811 	skb = nlmsg_new(payload, GFP_KERNEL);
1812 	if (!skb) {
1813 		struct sock *sk;
1814 
1815 		sk = netlink_lookup(sock_net(in_skb->sk),
1816 				    in_skb->sk->sk_protocol,
1817 				    NETLINK_CB(in_skb).pid);
1818 		if (sk) {
1819 			sk->sk_err = ENOBUFS;
1820 			sk->sk_error_report(sk);
1821 			sock_put(sk);
1822 		}
1823 		return;
1824 	}
1825 
1826 	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1827 			  NLMSG_ERROR, payload, 0);
1828 	errmsg = nlmsg_data(rep);
1829 	errmsg->error = err;
1830 	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1831 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1832 }
1833 EXPORT_SYMBOL(netlink_ack);
1834 
1835 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1836 						     struct nlmsghdr *))
1837 {
1838 	struct nlmsghdr *nlh;
1839 	int err;
1840 
1841 	while (skb->len >= nlmsg_total_size(0)) {
1842 		int msglen;
1843 
1844 		nlh = nlmsg_hdr(skb);
1845 		err = 0;
1846 
1847 		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1848 			return 0;
1849 
1850 		/* Only requests are handled by the kernel */
1851 		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1852 			goto ack;
1853 
1854 		/* Skip control messages */
1855 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1856 			goto ack;
1857 
1858 		err = cb(skb, nlh);
1859 		if (err == -EINTR)
1860 			goto skip;
1861 
1862 ack:
1863 		if (nlh->nlmsg_flags & NLM_F_ACK || err)
1864 			netlink_ack(skb, nlh, err);
1865 
1866 skip:
1867 		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1868 		if (msglen > skb->len)
1869 			msglen = skb->len;
1870 		skb_pull(skb, msglen);
1871 	}
1872 
1873 	return 0;
1874 }
1875 EXPORT_SYMBOL(netlink_rcv_skb);
1876 
1877 /**
1878  * nlmsg_notify - send a notification netlink message
1879  * @sk: netlink socket to use
1880  * @skb: notification message
1881  * @pid: destination netlink pid for reports or 0
1882  * @group: destination multicast group or 0
1883  * @report: 1 to report back, 0 to disable
1884  * @flags: allocation flags
1885  */
1886 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1887 		 unsigned int group, int report, gfp_t flags)
1888 {
1889 	int err = 0;
1890 
1891 	if (group) {
1892 		int exclude_pid = 0;
1893 
1894 		if (report) {
1895 			atomic_inc(&skb->users);
1896 			exclude_pid = pid;
1897 		}
1898 
1899 		/* errors reported via destination sk->sk_err, but propagate
1900 		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1901 		err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1902 	}
1903 
1904 	if (report) {
1905 		int err2;
1906 
1907 		err2 = nlmsg_unicast(sk, skb, pid);
1908 		if (!err || err == -ESRCH)
1909 			err = err2;
1910 	}
1911 
1912 	return err;
1913 }
1914 EXPORT_SYMBOL(nlmsg_notify);
1915 
1916 #ifdef CONFIG_PROC_FS
1917 struct nl_seq_iter {
1918 	struct seq_net_private p;
1919 	int link;
1920 	int hash_idx;
1921 };
1922 
1923 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1924 {
1925 	struct nl_seq_iter *iter = seq->private;
1926 	int i, j;
1927 	struct sock *s;
1928 	struct hlist_node *node;
1929 	loff_t off = 0;
1930 
1931 	for (i = 0; i < MAX_LINKS; i++) {
1932 		struct nl_pid_hash *hash = &nl_table[i].hash;
1933 
1934 		for (j = 0; j <= hash->mask; j++) {
1935 			sk_for_each(s, node, &hash->table[j]) {
1936 				if (sock_net(s) != seq_file_net(seq))
1937 					continue;
1938 				if (off == pos) {
1939 					iter->link = i;
1940 					iter->hash_idx = j;
1941 					return s;
1942 				}
1943 				++off;
1944 			}
1945 		}
1946 	}
1947 	return NULL;
1948 }
1949 
1950 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1951 	__acquires(nl_table_lock)
1952 {
1953 	read_lock(&nl_table_lock);
1954 	return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1955 }
1956 
1957 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1958 {
1959 	struct sock *s;
1960 	struct nl_seq_iter *iter;
1961 	int i, j;
1962 
1963 	++*pos;
1964 
1965 	if (v == SEQ_START_TOKEN)
1966 		return netlink_seq_socket_idx(seq, 0);
1967 
1968 	iter = seq->private;
1969 	s = v;
1970 	do {
1971 		s = sk_next(s);
1972 	} while (s && sock_net(s) != seq_file_net(seq));
1973 	if (s)
1974 		return s;
1975 
1976 	i = iter->link;
1977 	j = iter->hash_idx + 1;
1978 
1979 	do {
1980 		struct nl_pid_hash *hash = &nl_table[i].hash;
1981 
1982 		for (; j <= hash->mask; j++) {
1983 			s = sk_head(&hash->table[j]);
1984 			while (s && sock_net(s) != seq_file_net(seq))
1985 				s = sk_next(s);
1986 			if (s) {
1987 				iter->link = i;
1988 				iter->hash_idx = j;
1989 				return s;
1990 			}
1991 		}
1992 
1993 		j = 0;
1994 	} while (++i < MAX_LINKS);
1995 
1996 	return NULL;
1997 }
1998 
1999 static void netlink_seq_stop(struct seq_file *seq, void *v)
2000 	__releases(nl_table_lock)
2001 {
2002 	read_unlock(&nl_table_lock);
2003 }
2004 
2005 
2006 static int netlink_seq_show(struct seq_file *seq, void *v)
2007 {
2008 	if (v == SEQ_START_TOKEN) {
2009 		seq_puts(seq,
2010 			 "sk       Eth Pid    Groups   "
2011 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
2012 	} else {
2013 		struct sock *s = v;
2014 		struct netlink_sock *nlk = nlk_sk(s);
2015 
2016 		seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2017 			   s,
2018 			   s->sk_protocol,
2019 			   nlk->pid,
2020 			   nlk->groups ? (u32)nlk->groups[0] : 0,
2021 			   sk_rmem_alloc_get(s),
2022 			   sk_wmem_alloc_get(s),
2023 			   nlk->cb,
2024 			   atomic_read(&s->sk_refcnt),
2025 			   atomic_read(&s->sk_drops),
2026 			   sock_i_ino(s)
2027 			);
2028 
2029 	}
2030 	return 0;
2031 }
2032 
2033 static const struct seq_operations netlink_seq_ops = {
2034 	.start  = netlink_seq_start,
2035 	.next   = netlink_seq_next,
2036 	.stop   = netlink_seq_stop,
2037 	.show   = netlink_seq_show,
2038 };
2039 
2040 
2041 static int netlink_seq_open(struct inode *inode, struct file *file)
2042 {
2043 	return seq_open_net(inode, file, &netlink_seq_ops,
2044 				sizeof(struct nl_seq_iter));
2045 }
2046 
2047 static const struct file_operations netlink_seq_fops = {
2048 	.owner		= THIS_MODULE,
2049 	.open		= netlink_seq_open,
2050 	.read		= seq_read,
2051 	.llseek		= seq_lseek,
2052 	.release	= seq_release_net,
2053 };
2054 
2055 #endif
2056 
2057 int netlink_register_notifier(struct notifier_block *nb)
2058 {
2059 	return atomic_notifier_chain_register(&netlink_chain, nb);
2060 }
2061 EXPORT_SYMBOL(netlink_register_notifier);
2062 
2063 int netlink_unregister_notifier(struct notifier_block *nb)
2064 {
2065 	return atomic_notifier_chain_unregister(&netlink_chain, nb);
2066 }
2067 EXPORT_SYMBOL(netlink_unregister_notifier);
2068 
2069 static const struct proto_ops netlink_ops = {
2070 	.family =	PF_NETLINK,
2071 	.owner =	THIS_MODULE,
2072 	.release =	netlink_release,
2073 	.bind =		netlink_bind,
2074 	.connect =	netlink_connect,
2075 	.socketpair =	sock_no_socketpair,
2076 	.accept =	sock_no_accept,
2077 	.getname =	netlink_getname,
2078 	.poll =		datagram_poll,
2079 	.ioctl =	sock_no_ioctl,
2080 	.listen =	sock_no_listen,
2081 	.shutdown =	sock_no_shutdown,
2082 	.setsockopt =	netlink_setsockopt,
2083 	.getsockopt =	netlink_getsockopt,
2084 	.sendmsg =	netlink_sendmsg,
2085 	.recvmsg =	netlink_recvmsg,
2086 	.mmap =		sock_no_mmap,
2087 	.sendpage =	sock_no_sendpage,
2088 };
2089 
2090 static const struct net_proto_family netlink_family_ops = {
2091 	.family = PF_NETLINK,
2092 	.create = netlink_create,
2093 	.owner	= THIS_MODULE,	/* for consistency 8) */
2094 };
2095 
2096 static int __net_init netlink_net_init(struct net *net)
2097 {
2098 #ifdef CONFIG_PROC_FS
2099 	if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops))
2100 		return -ENOMEM;
2101 #endif
2102 	return 0;
2103 }
2104 
2105 static void __net_exit netlink_net_exit(struct net *net)
2106 {
2107 #ifdef CONFIG_PROC_FS
2108 	proc_net_remove(net, "netlink");
2109 #endif
2110 }
2111 
2112 static void __init netlink_add_usersock_entry(void)
2113 {
2114 	struct listeners *listeners;
2115 	int groups = 32;
2116 
2117 	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2118 	if (!listeners)
2119 		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2120 
2121 	netlink_table_grab();
2122 
2123 	nl_table[NETLINK_USERSOCK].groups = groups;
2124 	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2125 	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2126 	nl_table[NETLINK_USERSOCK].registered = 1;
2127 
2128 	netlink_table_ungrab();
2129 }
2130 
2131 static struct pernet_operations __net_initdata netlink_net_ops = {
2132 	.init = netlink_net_init,
2133 	.exit = netlink_net_exit,
2134 };
2135 
2136 static int __init netlink_proto_init(void)
2137 {
2138 	struct sk_buff *dummy_skb;
2139 	int i;
2140 	unsigned long limit;
2141 	unsigned int order;
2142 	int err = proto_register(&netlink_proto, 0);
2143 
2144 	if (err != 0)
2145 		goto out;
2146 
2147 	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
2148 
2149 	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2150 	if (!nl_table)
2151 		goto panic;
2152 
2153 	if (totalram_pages >= (128 * 1024))
2154 		limit = totalram_pages >> (21 - PAGE_SHIFT);
2155 	else
2156 		limit = totalram_pages >> (23 - PAGE_SHIFT);
2157 
2158 	order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
2159 	limit = (1UL << order) / sizeof(struct hlist_head);
2160 	order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2161 
2162 	for (i = 0; i < MAX_LINKS; i++) {
2163 		struct nl_pid_hash *hash = &nl_table[i].hash;
2164 
2165 		hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
2166 		if (!hash->table) {
2167 			while (i-- > 0)
2168 				nl_pid_hash_free(nl_table[i].hash.table,
2169 						 1 * sizeof(*hash->table));
2170 			kfree(nl_table);
2171 			goto panic;
2172 		}
2173 		hash->max_shift = order;
2174 		hash->shift = 0;
2175 		hash->mask = 0;
2176 		hash->rehash_time = jiffies;
2177 	}
2178 
2179 	netlink_add_usersock_entry();
2180 
2181 	sock_register(&netlink_family_ops);
2182 	register_pernet_subsys(&netlink_net_ops);
2183 	/* The netlink device handler may be needed early. */
2184 	rtnetlink_init();
2185 out:
2186 	return err;
2187 panic:
2188 	panic("netlink_init: Cannot allocate nl_table\n");
2189 }
2190 
2191 core_initcall(netlink_proto_init);
2192