xref: /linux/net/packet/af_packet.c (revision e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		PACKET - implements raw packet sockets.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *		Alan Cox	:	verify_area() now used correctly
14  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
15  *		Alan Cox	:	tidied skbuff lists.
16  *		Alan Cox	:	Now uses generic datagram routines I
17  *					added. Also fixed the peek/read crash
18  *					from all old Linux datagram code.
19  *		Alan Cox	:	Uses the improved datagram code.
20  *		Alan Cox	:	Added NULL's for socket options.
21  *		Alan Cox	:	Re-commented the code.
22  *		Alan Cox	:	Use new kernel side addressing
23  *		Rob Janssen	:	Correct MTU usage.
24  *		Dave Platt	:	Counter leaks caused by incorrect
25  *					interrupt locking and some slightly
26  *					dubious gcc output. Can you read
27  *					compiler: it said _VOLATILE_
28  *	Richard Kooijman	:	Timestamp fixes.
29  *		Alan Cox	:	New buffers. Use sk->mac.raw.
30  *		Alan Cox	:	sendmsg/recvmsg support.
31  *		Alan Cox	:	Protocol setting support
32  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
33  *	Cyrus Durgin		:	Fixed kerneld for kmod.
34  *	Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
38  *					The convention is that longer addresses
39  *					will simply extend the hardware address
40  *					byte arrays at the end of sockaddr_ll
41  *					and packet_mreq.
42  *		Johann Baudy	:	Added TX RING.
43  *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
44  *					layer.
45  *					Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *		This program is free software; you can redistribute it and/or
49  *		modify it under the terms of the GNU General Public License
50  *		as published by the Free Software Foundation; either version
51  *		2 of the License, or (at your option) any later version.
52  *
53  */
54 
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 
96 #include "internal.h"
97 
98 /*
99    Assumptions:
100    - if device has no dev->hard_header routine, it adds and removes ll header
101      inside itself. In this case ll header is invisible outside of device,
102      but higher levels still should reserve dev->hard_header_len.
103      Some devices are enough clever to reallocate skb, when header
104      will not fit to reserved space (tunnel), another ones are silly
105      (PPP).
106    - packet socket receives packets with pulled ll header,
107      so that SOCK_RAW should push it back.
108 
109 On receive:
110 -----------
111 
112 Incoming, dev->hard_header!=NULL
113    mac_header -> ll header
114    data       -> data
115 
116 Outgoing, dev->hard_header!=NULL
117    mac_header -> ll header
118    data       -> ll header
119 
120 Incoming, dev->hard_header==NULL
121    mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 		 header.  PPP makes it, that is wrong, because introduce
123 		 assymetry between rx and tx paths.
124    data       -> data
125 
126 Outgoing, dev->hard_header==NULL
127    mac_header -> data. ll header is still not built!
128    data       -> data
129 
130 Resume
131   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 
133 
134 On transmit:
135 ------------
136 
137 dev->hard_header != NULL
138    mac_header -> ll header
139    data       -> ll header
140 
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
142    mac_header -> data
143    data       -> data
144 
145    We should set nh.raw on output to correct posistion,
146    packet classifier depends on it.
147  */
148 
149 /* Private packet socket structures. */
150 
151 /* identical to struct packet_mreq except it has
152  * a longer address field.
153  */
154 struct packet_mreq_max {
155 	int		mr_ifindex;
156 	unsigned short	mr_type;
157 	unsigned short	mr_alen;
158 	unsigned char	mr_address[MAX_ADDR_LEN];
159 };
160 
161 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
162 		int closing, int tx_ring);
163 
164 
165 #define V3_ALIGNMENT	(8)
166 
167 #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
168 
169 #define BLK_PLUS_PRIV(sz_of_priv) \
170 	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
171 
172 #define PGV_FROM_VMALLOC 1
173 
174 #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
175 #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
176 #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
177 #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
178 #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
179 #define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
180 #define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
181 
182 struct packet_sock;
183 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
184 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
185 		       struct packet_type *pt, struct net_device *orig_dev);
186 
187 static void *packet_previous_frame(struct packet_sock *po,
188 		struct packet_ring_buffer *rb,
189 		int status);
190 static void packet_increment_head(struct packet_ring_buffer *buff);
191 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
192 			struct tpacket_block_desc *);
193 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
194 			struct packet_sock *);
195 static void prb_retire_current_block(struct tpacket_kbdq_core *,
196 		struct packet_sock *, unsigned int status);
197 static int prb_queue_frozen(struct tpacket_kbdq_core *);
198 static void prb_open_block(struct tpacket_kbdq_core *,
199 		struct tpacket_block_desc *);
200 static void prb_retire_rx_blk_timer_expired(unsigned long);
201 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
202 static void prb_init_blk_timer(struct packet_sock *,
203 		struct tpacket_kbdq_core *,
204 		void (*func) (unsigned long));
205 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
206 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
207 		struct tpacket3_hdr *);
208 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
209 		struct tpacket3_hdr *);
210 static void packet_flush_mclist(struct sock *sk);
211 
212 struct packet_skb_cb {
213 	unsigned int origlen;
214 	union {
215 		struct sockaddr_pkt pkt;
216 		struct sockaddr_ll ll;
217 	} sa;
218 };
219 
220 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
221 
222 #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
223 #define GET_PBLOCK_DESC(x, bid)	\
224 	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
225 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
226 	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
227 #define GET_NEXT_PRB_BLK_NUM(x) \
228 	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
229 	((x)->kactive_blk_num+1) : 0)
230 
231 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
232 static void __fanout_link(struct sock *sk, struct packet_sock *po);
233 
234 /* register_prot_hook must be invoked with the po->bind_lock held,
235  * or from a context in which asynchronous accesses to the packet
236  * socket is not possible (packet_create()).
237  */
238 static void register_prot_hook(struct sock *sk)
239 {
240 	struct packet_sock *po = pkt_sk(sk);
241 	if (!po->running) {
242 		if (po->fanout)
243 			__fanout_link(sk, po);
244 		else
245 			dev_add_pack(&po->prot_hook);
246 		sock_hold(sk);
247 		po->running = 1;
248 	}
249 }
250 
251 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
252  * held.   If the sync parameter is true, we will temporarily drop
253  * the po->bind_lock and do a synchronize_net to make sure no
254  * asynchronous packet processing paths still refer to the elements
255  * of po->prot_hook.  If the sync parameter is false, it is the
256  * callers responsibility to take care of this.
257  */
258 static void __unregister_prot_hook(struct sock *sk, bool sync)
259 {
260 	struct packet_sock *po = pkt_sk(sk);
261 
262 	po->running = 0;
263 	if (po->fanout)
264 		__fanout_unlink(sk, po);
265 	else
266 		__dev_remove_pack(&po->prot_hook);
267 	__sock_put(sk);
268 
269 	if (sync) {
270 		spin_unlock(&po->bind_lock);
271 		synchronize_net();
272 		spin_lock(&po->bind_lock);
273 	}
274 }
275 
276 static void unregister_prot_hook(struct sock *sk, bool sync)
277 {
278 	struct packet_sock *po = pkt_sk(sk);
279 
280 	if (po->running)
281 		__unregister_prot_hook(sk, sync);
282 }
283 
284 static inline __pure struct page *pgv_to_page(void *addr)
285 {
286 	if (is_vmalloc_addr(addr))
287 		return vmalloc_to_page(addr);
288 	return virt_to_page(addr);
289 }
290 
291 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
292 {
293 	union {
294 		struct tpacket_hdr *h1;
295 		struct tpacket2_hdr *h2;
296 		void *raw;
297 	} h;
298 
299 	h.raw = frame;
300 	switch (po->tp_version) {
301 	case TPACKET_V1:
302 		h.h1->tp_status = status;
303 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
304 		break;
305 	case TPACKET_V2:
306 		h.h2->tp_status = status;
307 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
308 		break;
309 	case TPACKET_V3:
310 	default:
311 		WARN(1, "TPACKET version not supported.\n");
312 		BUG();
313 	}
314 
315 	smp_wmb();
316 }
317 
318 static int __packet_get_status(struct packet_sock *po, void *frame)
319 {
320 	union {
321 		struct tpacket_hdr *h1;
322 		struct tpacket2_hdr *h2;
323 		void *raw;
324 	} h;
325 
326 	smp_rmb();
327 
328 	h.raw = frame;
329 	switch (po->tp_version) {
330 	case TPACKET_V1:
331 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
332 		return h.h1->tp_status;
333 	case TPACKET_V2:
334 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
335 		return h.h2->tp_status;
336 	case TPACKET_V3:
337 	default:
338 		WARN(1, "TPACKET version not supported.\n");
339 		BUG();
340 		return 0;
341 	}
342 }
343 
344 static void *packet_lookup_frame(struct packet_sock *po,
345 		struct packet_ring_buffer *rb,
346 		unsigned int position,
347 		int status)
348 {
349 	unsigned int pg_vec_pos, frame_offset;
350 	union {
351 		struct tpacket_hdr *h1;
352 		struct tpacket2_hdr *h2;
353 		void *raw;
354 	} h;
355 
356 	pg_vec_pos = position / rb->frames_per_block;
357 	frame_offset = position % rb->frames_per_block;
358 
359 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
360 		(frame_offset * rb->frame_size);
361 
362 	if (status != __packet_get_status(po, h.raw))
363 		return NULL;
364 
365 	return h.raw;
366 }
367 
368 static void *packet_current_frame(struct packet_sock *po,
369 		struct packet_ring_buffer *rb,
370 		int status)
371 {
372 	return packet_lookup_frame(po, rb, rb->head, status);
373 }
374 
375 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
376 {
377 	del_timer_sync(&pkc->retire_blk_timer);
378 }
379 
380 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
381 		int tx_ring,
382 		struct sk_buff_head *rb_queue)
383 {
384 	struct tpacket_kbdq_core *pkc;
385 
386 	pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
387 
388 	spin_lock(&rb_queue->lock);
389 	pkc->delete_blk_timer = 1;
390 	spin_unlock(&rb_queue->lock);
391 
392 	prb_del_retire_blk_timer(pkc);
393 }
394 
395 static void prb_init_blk_timer(struct packet_sock *po,
396 		struct tpacket_kbdq_core *pkc,
397 		void (*func) (unsigned long))
398 {
399 	init_timer(&pkc->retire_blk_timer);
400 	pkc->retire_blk_timer.data = (long)po;
401 	pkc->retire_blk_timer.function = func;
402 	pkc->retire_blk_timer.expires = jiffies;
403 }
404 
405 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
406 {
407 	struct tpacket_kbdq_core *pkc;
408 
409 	if (tx_ring)
410 		BUG();
411 
412 	pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
413 	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
414 }
415 
416 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
417 				int blk_size_in_bytes)
418 {
419 	struct net_device *dev;
420 	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
421 	struct ethtool_cmd ecmd;
422 	int err;
423 	u32 speed;
424 
425 	rtnl_lock();
426 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
427 	if (unlikely(!dev)) {
428 		rtnl_unlock();
429 		return DEFAULT_PRB_RETIRE_TOV;
430 	}
431 	err = __ethtool_get_settings(dev, &ecmd);
432 	speed = ethtool_cmd_speed(&ecmd);
433 	rtnl_unlock();
434 	if (!err) {
435 		/*
436 		 * If the link speed is so slow you don't really
437 		 * need to worry about perf anyways
438 		 */
439 		if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
440 			return DEFAULT_PRB_RETIRE_TOV;
441 		} else {
442 			msec = 1;
443 			div = speed / 1000;
444 		}
445 	}
446 
447 	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
448 
449 	if (div)
450 		mbits /= div;
451 
452 	tmo = mbits * msec;
453 
454 	if (div)
455 		return tmo+1;
456 	return tmo;
457 }
458 
459 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
460 			union tpacket_req_u *req_u)
461 {
462 	p1->feature_req_word = req_u->req3.tp_feature_req_word;
463 }
464 
465 static void init_prb_bdqc(struct packet_sock *po,
466 			struct packet_ring_buffer *rb,
467 			struct pgv *pg_vec,
468 			union tpacket_req_u *req_u, int tx_ring)
469 {
470 	struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
471 	struct tpacket_block_desc *pbd;
472 
473 	memset(p1, 0x0, sizeof(*p1));
474 
475 	p1->knxt_seq_num = 1;
476 	p1->pkbdq = pg_vec;
477 	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
478 	p1->pkblk_start	= pg_vec[0].buffer;
479 	p1->kblk_size = req_u->req3.tp_block_size;
480 	p1->knum_blocks	= req_u->req3.tp_block_nr;
481 	p1->hdrlen = po->tp_hdrlen;
482 	p1->version = po->tp_version;
483 	p1->last_kactive_blk_num = 0;
484 	po->stats_u.stats3.tp_freeze_q_cnt = 0;
485 	if (req_u->req3.tp_retire_blk_tov)
486 		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
487 	else
488 		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
489 						req_u->req3.tp_block_size);
490 	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
491 	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
492 
493 	prb_init_ft_ops(p1, req_u);
494 	prb_setup_retire_blk_timer(po, tx_ring);
495 	prb_open_block(p1, pbd);
496 }
497 
498 /*  Do NOT update the last_blk_num first.
499  *  Assumes sk_buff_head lock is held.
500  */
501 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
502 {
503 	mod_timer(&pkc->retire_blk_timer,
504 			jiffies + pkc->tov_in_jiffies);
505 	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
506 }
507 
508 /*
509  * Timer logic:
510  * 1) We refresh the timer only when we open a block.
511  *    By doing this we don't waste cycles refreshing the timer
512  *	  on packet-by-packet basis.
513  *
514  * With a 1MB block-size, on a 1Gbps line, it will take
515  * i) ~8 ms to fill a block + ii) memcpy etc.
516  * In this cut we are not accounting for the memcpy time.
517  *
518  * So, if the user sets the 'tmo' to 10ms then the timer
519  * will never fire while the block is still getting filled
520  * (which is what we want). However, the user could choose
521  * to close a block early and that's fine.
522  *
523  * But when the timer does fire, we check whether or not to refresh it.
524  * Since the tmo granularity is in msecs, it is not too expensive
525  * to refresh the timer, lets say every '8' msecs.
526  * Either the user can set the 'tmo' or we can derive it based on
527  * a) line-speed and b) block-size.
528  * prb_calc_retire_blk_tmo() calculates the tmo.
529  *
530  */
531 static void prb_retire_rx_blk_timer_expired(unsigned long data)
532 {
533 	struct packet_sock *po = (struct packet_sock *)data;
534 	struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
535 	unsigned int frozen;
536 	struct tpacket_block_desc *pbd;
537 
538 	spin_lock(&po->sk.sk_receive_queue.lock);
539 
540 	frozen = prb_queue_frozen(pkc);
541 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
542 
543 	if (unlikely(pkc->delete_blk_timer))
544 		goto out;
545 
546 	/* We only need to plug the race when the block is partially filled.
547 	 * tpacket_rcv:
548 	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
549 	 *		copy_bits() is in progress ...
550 	 *		timer fires on other cpu:
551 	 *		we can't retire the current block because copy_bits
552 	 *		is in progress.
553 	 *
554 	 */
555 	if (BLOCK_NUM_PKTS(pbd)) {
556 		while (atomic_read(&pkc->blk_fill_in_prog)) {
557 			/* Waiting for skb_copy_bits to finish... */
558 			cpu_relax();
559 		}
560 	}
561 
562 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
563 		if (!frozen) {
564 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
565 			if (!prb_dispatch_next_block(pkc, po))
566 				goto refresh_timer;
567 			else
568 				goto out;
569 		} else {
570 			/* Case 1. Queue was frozen because user-space was
571 			 *	   lagging behind.
572 			 */
573 			if (prb_curr_blk_in_use(pkc, pbd)) {
574 				/*
575 				 * Ok, user-space is still behind.
576 				 * So just refresh the timer.
577 				 */
578 				goto refresh_timer;
579 			} else {
580 			       /* Case 2. queue was frozen,user-space caught up,
581 				* now the link went idle && the timer fired.
582 				* We don't have a block to close.So we open this
583 				* block and restart the timer.
584 				* opening a block thaws the queue,restarts timer
585 				* Thawing/timer-refresh is a side effect.
586 				*/
587 				prb_open_block(pkc, pbd);
588 				goto out;
589 			}
590 		}
591 	}
592 
593 refresh_timer:
594 	_prb_refresh_rx_retire_blk_timer(pkc);
595 
596 out:
597 	spin_unlock(&po->sk.sk_receive_queue.lock);
598 }
599 
600 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
601 		struct tpacket_block_desc *pbd1, __u32 status)
602 {
603 	/* Flush everything minus the block header */
604 
605 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
606 	u8 *start, *end;
607 
608 	start = (u8 *)pbd1;
609 
610 	/* Skip the block header(we know header WILL fit in 4K) */
611 	start += PAGE_SIZE;
612 
613 	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
614 	for (; start < end; start += PAGE_SIZE)
615 		flush_dcache_page(pgv_to_page(start));
616 
617 	smp_wmb();
618 #endif
619 
620 	/* Now update the block status. */
621 
622 	BLOCK_STATUS(pbd1) = status;
623 
624 	/* Flush the block header */
625 
626 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
627 	start = (u8 *)pbd1;
628 	flush_dcache_page(pgv_to_page(start));
629 
630 	smp_wmb();
631 #endif
632 }
633 
634 /*
635  * Side effect:
636  *
637  * 1) flush the block
638  * 2) Increment active_blk_num
639  *
640  * Note:We DONT refresh the timer on purpose.
641  *	Because almost always the next block will be opened.
642  */
643 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
644 		struct tpacket_block_desc *pbd1,
645 		struct packet_sock *po, unsigned int stat)
646 {
647 	__u32 status = TP_STATUS_USER | stat;
648 
649 	struct tpacket3_hdr *last_pkt;
650 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
651 
652 	if (po->stats.tp_drops)
653 		status |= TP_STATUS_LOSING;
654 
655 	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
656 	last_pkt->tp_next_offset = 0;
657 
658 	/* Get the ts of the last pkt */
659 	if (BLOCK_NUM_PKTS(pbd1)) {
660 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
661 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
662 	} else {
663 		/* Ok, we tmo'd - so get the current time */
664 		struct timespec ts;
665 		getnstimeofday(&ts);
666 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
667 		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
668 	}
669 
670 	smp_wmb();
671 
672 	/* Flush the block */
673 	prb_flush_block(pkc1, pbd1, status);
674 
675 	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
676 }
677 
678 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
679 {
680 	pkc->reset_pending_on_curr_blk = 0;
681 }
682 
683 /*
684  * Side effect of opening a block:
685  *
686  * 1) prb_queue is thawed.
687  * 2) retire_blk_timer is refreshed.
688  *
689  */
690 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
691 	struct tpacket_block_desc *pbd1)
692 {
693 	struct timespec ts;
694 	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
695 
696 	smp_rmb();
697 
698 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
699 
700 		/* We could have just memset this but we will lose the
701 		 * flexibility of making the priv area sticky
702 		 */
703 		BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
704 		BLOCK_NUM_PKTS(pbd1) = 0;
705 		BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
706 		getnstimeofday(&ts);
707 		h1->ts_first_pkt.ts_sec = ts.tv_sec;
708 		h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
709 		pkc1->pkblk_start = (char *)pbd1;
710 		pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
711 		BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
712 		BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
713 		pbd1->version = pkc1->version;
714 		pkc1->prev = pkc1->nxt_offset;
715 		pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
716 		prb_thaw_queue(pkc1);
717 		_prb_refresh_rx_retire_blk_timer(pkc1);
718 
719 		smp_wmb();
720 
721 		return;
722 	}
723 
724 	WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
725 		pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
726 	dump_stack();
727 	BUG();
728 }
729 
730 /*
731  * Queue freeze logic:
732  * 1) Assume tp_block_nr = 8 blocks.
733  * 2) At time 't0', user opens Rx ring.
734  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
735  * 4) user-space is either sleeping or processing block '0'.
736  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
737  *    it will close block-7,loop around and try to fill block '0'.
738  *    call-flow:
739  *    __packet_lookup_frame_in_block
740  *      prb_retire_current_block()
741  *      prb_dispatch_next_block()
742  *        |->(BLOCK_STATUS == USER) evaluates to true
743  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
744  * 6) Now there are two cases:
745  *    6.1) Link goes idle right after the queue is frozen.
746  *         But remember, the last open_block() refreshed the timer.
747  *         When this timer expires,it will refresh itself so that we can
748  *         re-open block-0 in near future.
749  *    6.2) Link is busy and keeps on receiving packets. This is a simple
750  *         case and __packet_lookup_frame_in_block will check if block-0
751  *         is free and can now be re-used.
752  */
753 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
754 				  struct packet_sock *po)
755 {
756 	pkc->reset_pending_on_curr_blk = 1;
757 	po->stats_u.stats3.tp_freeze_q_cnt++;
758 }
759 
760 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
761 
762 /*
763  * If the next block is free then we will dispatch it
764  * and return a good offset.
765  * Else, we will freeze the queue.
766  * So, caller must check the return value.
767  */
768 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
769 		struct packet_sock *po)
770 {
771 	struct tpacket_block_desc *pbd;
772 
773 	smp_rmb();
774 
775 	/* 1. Get current block num */
776 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
777 
778 	/* 2. If this block is currently in_use then freeze the queue */
779 	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
780 		prb_freeze_queue(pkc, po);
781 		return NULL;
782 	}
783 
784 	/*
785 	 * 3.
786 	 * open this block and return the offset where the first packet
787 	 * needs to get stored.
788 	 */
789 	prb_open_block(pkc, pbd);
790 	return (void *)pkc->nxt_offset;
791 }
792 
793 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
794 		struct packet_sock *po, unsigned int status)
795 {
796 	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
797 
798 	/* retire/close the current block */
799 	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
800 		/*
801 		 * Plug the case where copy_bits() is in progress on
802 		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
803 		 * have space to copy the pkt in the current block and
804 		 * called prb_retire_current_block()
805 		 *
806 		 * We don't need to worry about the TMO case because
807 		 * the timer-handler already handled this case.
808 		 */
809 		if (!(status & TP_STATUS_BLK_TMO)) {
810 			while (atomic_read(&pkc->blk_fill_in_prog)) {
811 				/* Waiting for skb_copy_bits to finish... */
812 				cpu_relax();
813 			}
814 		}
815 		prb_close_block(pkc, pbd, po, status);
816 		return;
817 	}
818 
819 	WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
820 	dump_stack();
821 	BUG();
822 }
823 
824 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
825 				      struct tpacket_block_desc *pbd)
826 {
827 	return TP_STATUS_USER & BLOCK_STATUS(pbd);
828 }
829 
830 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
831 {
832 	return pkc->reset_pending_on_curr_blk;
833 }
834 
835 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
836 {
837 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
838 	atomic_dec(&pkc->blk_fill_in_prog);
839 }
840 
841 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
842 			struct tpacket3_hdr *ppd)
843 {
844 	ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
845 }
846 
847 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
848 			struct tpacket3_hdr *ppd)
849 {
850 	ppd->hv1.tp_rxhash = 0;
851 }
852 
853 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
854 			struct tpacket3_hdr *ppd)
855 {
856 	if (vlan_tx_tag_present(pkc->skb)) {
857 		ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
858 		ppd->tp_status = TP_STATUS_VLAN_VALID;
859 	} else {
860 		ppd->hv1.tp_vlan_tci = 0;
861 		ppd->tp_status = TP_STATUS_AVAILABLE;
862 	}
863 }
864 
865 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
866 			struct tpacket3_hdr *ppd)
867 {
868 	prb_fill_vlan_info(pkc, ppd);
869 
870 	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
871 		prb_fill_rxhash(pkc, ppd);
872 	else
873 		prb_clear_rxhash(pkc, ppd);
874 }
875 
876 static void prb_fill_curr_block(char *curr,
877 				struct tpacket_kbdq_core *pkc,
878 				struct tpacket_block_desc *pbd,
879 				unsigned int len)
880 {
881 	struct tpacket3_hdr *ppd;
882 
883 	ppd  = (struct tpacket3_hdr *)curr;
884 	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
885 	pkc->prev = curr;
886 	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
887 	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
888 	BLOCK_NUM_PKTS(pbd) += 1;
889 	atomic_inc(&pkc->blk_fill_in_prog);
890 	prb_run_all_ft_ops(pkc, ppd);
891 }
892 
893 /* Assumes caller has the sk->rx_queue.lock */
894 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
895 					    struct sk_buff *skb,
896 						int status,
897 					    unsigned int len
898 					    )
899 {
900 	struct tpacket_kbdq_core *pkc;
901 	struct tpacket_block_desc *pbd;
902 	char *curr, *end;
903 
904 	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
905 	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
906 
907 	/* Queue is frozen when user space is lagging behind */
908 	if (prb_queue_frozen(pkc)) {
909 		/*
910 		 * Check if that last block which caused the queue to freeze,
911 		 * is still in_use by user-space.
912 		 */
913 		if (prb_curr_blk_in_use(pkc, pbd)) {
914 			/* Can't record this packet */
915 			return NULL;
916 		} else {
917 			/*
918 			 * Ok, the block was released by user-space.
919 			 * Now let's open that block.
920 			 * opening a block also thaws the queue.
921 			 * Thawing is a side effect.
922 			 */
923 			prb_open_block(pkc, pbd);
924 		}
925 	}
926 
927 	smp_mb();
928 	curr = pkc->nxt_offset;
929 	pkc->skb = skb;
930 	end = (char *)pbd + pkc->kblk_size;
931 
932 	/* first try the current block */
933 	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
934 		prb_fill_curr_block(curr, pkc, pbd, len);
935 		return (void *)curr;
936 	}
937 
938 	/* Ok, close the current block */
939 	prb_retire_current_block(pkc, po, 0);
940 
941 	/* Now, try to dispatch the next block */
942 	curr = (char *)prb_dispatch_next_block(pkc, po);
943 	if (curr) {
944 		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
945 		prb_fill_curr_block(curr, pkc, pbd, len);
946 		return (void *)curr;
947 	}
948 
949 	/*
950 	 * No free blocks are available.user_space hasn't caught up yet.
951 	 * Queue was just frozen and now this packet will get dropped.
952 	 */
953 	return NULL;
954 }
955 
956 static void *packet_current_rx_frame(struct packet_sock *po,
957 					    struct sk_buff *skb,
958 					    int status, unsigned int len)
959 {
960 	char *curr = NULL;
961 	switch (po->tp_version) {
962 	case TPACKET_V1:
963 	case TPACKET_V2:
964 		curr = packet_lookup_frame(po, &po->rx_ring,
965 					po->rx_ring.head, status);
966 		return curr;
967 	case TPACKET_V3:
968 		return __packet_lookup_frame_in_block(po, skb, status, len);
969 	default:
970 		WARN(1, "TPACKET version not supported\n");
971 		BUG();
972 		return NULL;
973 	}
974 }
975 
976 static void *prb_lookup_block(struct packet_sock *po,
977 				     struct packet_ring_buffer *rb,
978 				     unsigned int idx,
979 				     int status)
980 {
981 	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
982 	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
983 
984 	if (status != BLOCK_STATUS(pbd))
985 		return NULL;
986 	return pbd;
987 }
988 
989 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
990 {
991 	unsigned int prev;
992 	if (rb->prb_bdqc.kactive_blk_num)
993 		prev = rb->prb_bdqc.kactive_blk_num-1;
994 	else
995 		prev = rb->prb_bdqc.knum_blocks-1;
996 	return prev;
997 }
998 
999 /* Assumes caller has held the rx_queue.lock */
1000 static void *__prb_previous_block(struct packet_sock *po,
1001 					 struct packet_ring_buffer *rb,
1002 					 int status)
1003 {
1004 	unsigned int previous = prb_previous_blk_num(rb);
1005 	return prb_lookup_block(po, rb, previous, status);
1006 }
1007 
1008 static void *packet_previous_rx_frame(struct packet_sock *po,
1009 					     struct packet_ring_buffer *rb,
1010 					     int status)
1011 {
1012 	if (po->tp_version <= TPACKET_V2)
1013 		return packet_previous_frame(po, rb, status);
1014 
1015 	return __prb_previous_block(po, rb, status);
1016 }
1017 
1018 static void packet_increment_rx_head(struct packet_sock *po,
1019 					    struct packet_ring_buffer *rb)
1020 {
1021 	switch (po->tp_version) {
1022 	case TPACKET_V1:
1023 	case TPACKET_V2:
1024 		return packet_increment_head(rb);
1025 	case TPACKET_V3:
1026 	default:
1027 		WARN(1, "TPACKET version not supported.\n");
1028 		BUG();
1029 		return;
1030 	}
1031 }
1032 
1033 static void *packet_previous_frame(struct packet_sock *po,
1034 		struct packet_ring_buffer *rb,
1035 		int status)
1036 {
1037 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1038 	return packet_lookup_frame(po, rb, previous, status);
1039 }
1040 
1041 static void packet_increment_head(struct packet_ring_buffer *buff)
1042 {
1043 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1044 }
1045 
1046 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1047 {
1048 	struct sock *sk = &po->sk;
1049 	bool has_room;
1050 
1051 	if (po->prot_hook.func != tpacket_rcv)
1052 		return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1053 			<= sk->sk_rcvbuf;
1054 
1055 	spin_lock(&sk->sk_receive_queue.lock);
1056 	if (po->tp_version == TPACKET_V3)
1057 		has_room = prb_lookup_block(po, &po->rx_ring,
1058 					    po->rx_ring.prb_bdqc.kactive_blk_num,
1059 					    TP_STATUS_KERNEL);
1060 	else
1061 		has_room = packet_lookup_frame(po, &po->rx_ring,
1062 					       po->rx_ring.head,
1063 					       TP_STATUS_KERNEL);
1064 	spin_unlock(&sk->sk_receive_queue.lock);
1065 
1066 	return has_room;
1067 }
1068 
1069 static void packet_sock_destruct(struct sock *sk)
1070 {
1071 	skb_queue_purge(&sk->sk_error_queue);
1072 
1073 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1074 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1075 
1076 	if (!sock_flag(sk, SOCK_DEAD)) {
1077 		pr_err("Attempt to release alive packet socket: %p\n", sk);
1078 		return;
1079 	}
1080 
1081 	sk_refcnt_debug_dec(sk);
1082 }
1083 
1084 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1085 {
1086 	int x = atomic_read(&f->rr_cur) + 1;
1087 
1088 	if (x >= num)
1089 		x = 0;
1090 
1091 	return x;
1092 }
1093 
1094 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1095 				      struct sk_buff *skb,
1096 				      unsigned int num)
1097 {
1098 	return (((u64)skb->rxhash) * num) >> 32;
1099 }
1100 
1101 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1102 				    struct sk_buff *skb,
1103 				    unsigned int num)
1104 {
1105 	int cur, old;
1106 
1107 	cur = atomic_read(&f->rr_cur);
1108 	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1109 				     fanout_rr_next(f, num))) != cur)
1110 		cur = old;
1111 	return cur;
1112 }
1113 
1114 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1115 				     struct sk_buff *skb,
1116 				     unsigned int num)
1117 {
1118 	return smp_processor_id() % num;
1119 }
1120 
1121 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1122 					  struct sk_buff *skb,
1123 					  unsigned int idx, unsigned int skip,
1124 					  unsigned int num)
1125 {
1126 	unsigned int i, j;
1127 
1128 	i = j = min_t(int, f->next[idx], num - 1);
1129 	do {
1130 		if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1131 			if (i != j)
1132 				f->next[idx] = i;
1133 			return i;
1134 		}
1135 		if (++i == num)
1136 			i = 0;
1137 	} while (i != j);
1138 
1139 	return idx;
1140 }
1141 
1142 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1143 {
1144 	return f->flags & (flag >> 8);
1145 }
1146 
1147 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1148 			     struct packet_type *pt, struct net_device *orig_dev)
1149 {
1150 	struct packet_fanout *f = pt->af_packet_priv;
1151 	unsigned int num = f->num_members;
1152 	struct packet_sock *po;
1153 	unsigned int idx;
1154 
1155 	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1156 	    !num) {
1157 		kfree_skb(skb);
1158 		return 0;
1159 	}
1160 
1161 	switch (f->type) {
1162 	case PACKET_FANOUT_HASH:
1163 	default:
1164 		if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1165 			skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1166 			if (!skb)
1167 				return 0;
1168 		}
1169 		skb_get_rxhash(skb);
1170 		idx = fanout_demux_hash(f, skb, num);
1171 		break;
1172 	case PACKET_FANOUT_LB:
1173 		idx = fanout_demux_lb(f, skb, num);
1174 		break;
1175 	case PACKET_FANOUT_CPU:
1176 		idx = fanout_demux_cpu(f, skb, num);
1177 		break;
1178 	case PACKET_FANOUT_ROLLOVER:
1179 		idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1180 		break;
1181 	}
1182 
1183 	po = pkt_sk(f->arr[idx]);
1184 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1185 	    unlikely(!packet_rcv_has_room(po, skb))) {
1186 		idx = fanout_demux_rollover(f, skb, idx, idx, num);
1187 		po = pkt_sk(f->arr[idx]);
1188 	}
1189 
1190 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1191 }
1192 
1193 DEFINE_MUTEX(fanout_mutex);
1194 EXPORT_SYMBOL_GPL(fanout_mutex);
1195 static LIST_HEAD(fanout_list);
1196 
1197 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1198 {
1199 	struct packet_fanout *f = po->fanout;
1200 
1201 	spin_lock(&f->lock);
1202 	f->arr[f->num_members] = sk;
1203 	smp_wmb();
1204 	f->num_members++;
1205 	spin_unlock(&f->lock);
1206 }
1207 
1208 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1209 {
1210 	struct packet_fanout *f = po->fanout;
1211 	int i;
1212 
1213 	spin_lock(&f->lock);
1214 	for (i = 0; i < f->num_members; i++) {
1215 		if (f->arr[i] == sk)
1216 			break;
1217 	}
1218 	BUG_ON(i >= f->num_members);
1219 	f->arr[i] = f->arr[f->num_members - 1];
1220 	f->num_members--;
1221 	spin_unlock(&f->lock);
1222 }
1223 
1224 static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1225 {
1226 	if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1227 		return true;
1228 
1229 	return false;
1230 }
1231 
1232 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1233 {
1234 	struct packet_sock *po = pkt_sk(sk);
1235 	struct packet_fanout *f, *match;
1236 	u8 type = type_flags & 0xff;
1237 	u8 flags = type_flags >> 8;
1238 	int err;
1239 
1240 	switch (type) {
1241 	case PACKET_FANOUT_ROLLOVER:
1242 		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1243 			return -EINVAL;
1244 	case PACKET_FANOUT_HASH:
1245 	case PACKET_FANOUT_LB:
1246 	case PACKET_FANOUT_CPU:
1247 		break;
1248 	default:
1249 		return -EINVAL;
1250 	}
1251 
1252 	if (!po->running)
1253 		return -EINVAL;
1254 
1255 	if (po->fanout)
1256 		return -EALREADY;
1257 
1258 	mutex_lock(&fanout_mutex);
1259 	match = NULL;
1260 	list_for_each_entry(f, &fanout_list, list) {
1261 		if (f->id == id &&
1262 		    read_pnet(&f->net) == sock_net(sk)) {
1263 			match = f;
1264 			break;
1265 		}
1266 	}
1267 	err = -EINVAL;
1268 	if (match && match->flags != flags)
1269 		goto out;
1270 	if (!match) {
1271 		err = -ENOMEM;
1272 		match = kzalloc(sizeof(*match), GFP_KERNEL);
1273 		if (!match)
1274 			goto out;
1275 		write_pnet(&match->net, sock_net(sk));
1276 		match->id = id;
1277 		match->type = type;
1278 		match->flags = flags;
1279 		atomic_set(&match->rr_cur, 0);
1280 		INIT_LIST_HEAD(&match->list);
1281 		spin_lock_init(&match->lock);
1282 		atomic_set(&match->sk_ref, 0);
1283 		match->prot_hook.type = po->prot_hook.type;
1284 		match->prot_hook.dev = po->prot_hook.dev;
1285 		match->prot_hook.func = packet_rcv_fanout;
1286 		match->prot_hook.af_packet_priv = match;
1287 		match->prot_hook.id_match = match_fanout_group;
1288 		dev_add_pack(&match->prot_hook);
1289 		list_add(&match->list, &fanout_list);
1290 	}
1291 	err = -EINVAL;
1292 	if (match->type == type &&
1293 	    match->prot_hook.type == po->prot_hook.type &&
1294 	    match->prot_hook.dev == po->prot_hook.dev) {
1295 		err = -ENOSPC;
1296 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1297 			__dev_remove_pack(&po->prot_hook);
1298 			po->fanout = match;
1299 			atomic_inc(&match->sk_ref);
1300 			__fanout_link(sk, po);
1301 			err = 0;
1302 		}
1303 	}
1304 out:
1305 	mutex_unlock(&fanout_mutex);
1306 	return err;
1307 }
1308 
1309 static void fanout_release(struct sock *sk)
1310 {
1311 	struct packet_sock *po = pkt_sk(sk);
1312 	struct packet_fanout *f;
1313 
1314 	f = po->fanout;
1315 	if (!f)
1316 		return;
1317 
1318 	mutex_lock(&fanout_mutex);
1319 	po->fanout = NULL;
1320 
1321 	if (atomic_dec_and_test(&f->sk_ref)) {
1322 		list_del(&f->list);
1323 		dev_remove_pack(&f->prot_hook);
1324 		kfree(f);
1325 	}
1326 	mutex_unlock(&fanout_mutex);
1327 }
1328 
1329 static const struct proto_ops packet_ops;
1330 
1331 static const struct proto_ops packet_ops_spkt;
1332 
1333 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1334 			   struct packet_type *pt, struct net_device *orig_dev)
1335 {
1336 	struct sock *sk;
1337 	struct sockaddr_pkt *spkt;
1338 
1339 	/*
1340 	 *	When we registered the protocol we saved the socket in the data
1341 	 *	field for just this event.
1342 	 */
1343 
1344 	sk = pt->af_packet_priv;
1345 
1346 	/*
1347 	 *	Yank back the headers [hope the device set this
1348 	 *	right or kerboom...]
1349 	 *
1350 	 *	Incoming packets have ll header pulled,
1351 	 *	push it back.
1352 	 *
1353 	 *	For outgoing ones skb->data == skb_mac_header(skb)
1354 	 *	so that this procedure is noop.
1355 	 */
1356 
1357 	if (skb->pkt_type == PACKET_LOOPBACK)
1358 		goto out;
1359 
1360 	if (!net_eq(dev_net(dev), sock_net(sk)))
1361 		goto out;
1362 
1363 	skb = skb_share_check(skb, GFP_ATOMIC);
1364 	if (skb == NULL)
1365 		goto oom;
1366 
1367 	/* drop any routing info */
1368 	skb_dst_drop(skb);
1369 
1370 	/* drop conntrack reference */
1371 	nf_reset(skb);
1372 
1373 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1374 
1375 	skb_push(skb, skb->data - skb_mac_header(skb));
1376 
1377 	/*
1378 	 *	The SOCK_PACKET socket receives _all_ frames.
1379 	 */
1380 
1381 	spkt->spkt_family = dev->type;
1382 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1383 	spkt->spkt_protocol = skb->protocol;
1384 
1385 	/*
1386 	 *	Charge the memory to the socket. This is done specifically
1387 	 *	to prevent sockets using all the memory up.
1388 	 */
1389 
1390 	if (sock_queue_rcv_skb(sk, skb) == 0)
1391 		return 0;
1392 
1393 out:
1394 	kfree_skb(skb);
1395 oom:
1396 	return 0;
1397 }
1398 
1399 
1400 /*
1401  *	Output a raw packet to a device layer. This bypasses all the other
1402  *	protocol layers and you must therefore supply it with a complete frame
1403  */
1404 
1405 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1406 			       struct msghdr *msg, size_t len)
1407 {
1408 	struct sock *sk = sock->sk;
1409 	struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1410 	struct sk_buff *skb = NULL;
1411 	struct net_device *dev;
1412 	__be16 proto = 0;
1413 	int err;
1414 	int extra_len = 0;
1415 
1416 	/*
1417 	 *	Get and verify the address.
1418 	 */
1419 
1420 	if (saddr) {
1421 		if (msg->msg_namelen < sizeof(struct sockaddr))
1422 			return -EINVAL;
1423 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1424 			proto = saddr->spkt_protocol;
1425 	} else
1426 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1427 
1428 	/*
1429 	 *	Find the device first to size check it
1430 	 */
1431 
1432 	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1433 retry:
1434 	rcu_read_lock();
1435 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1436 	err = -ENODEV;
1437 	if (dev == NULL)
1438 		goto out_unlock;
1439 
1440 	err = -ENETDOWN;
1441 	if (!(dev->flags & IFF_UP))
1442 		goto out_unlock;
1443 
1444 	/*
1445 	 * You may not queue a frame bigger than the mtu. This is the lowest level
1446 	 * raw protocol and you must do your own fragmentation at this level.
1447 	 */
1448 
1449 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1450 		if (!netif_supports_nofcs(dev)) {
1451 			err = -EPROTONOSUPPORT;
1452 			goto out_unlock;
1453 		}
1454 		extra_len = 4; /* We're doing our own CRC */
1455 	}
1456 
1457 	err = -EMSGSIZE;
1458 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1459 		goto out_unlock;
1460 
1461 	if (!skb) {
1462 		size_t reserved = LL_RESERVED_SPACE(dev);
1463 		int tlen = dev->needed_tailroom;
1464 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1465 
1466 		rcu_read_unlock();
1467 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1468 		if (skb == NULL)
1469 			return -ENOBUFS;
1470 		/* FIXME: Save some space for broken drivers that write a hard
1471 		 * header at transmission time by themselves. PPP is the notable
1472 		 * one here. This should really be fixed at the driver level.
1473 		 */
1474 		skb_reserve(skb, reserved);
1475 		skb_reset_network_header(skb);
1476 
1477 		/* Try to align data part correctly */
1478 		if (hhlen) {
1479 			skb->data -= hhlen;
1480 			skb->tail -= hhlen;
1481 			if (len < hhlen)
1482 				skb_reset_network_header(skb);
1483 		}
1484 		err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1485 		if (err)
1486 			goto out_free;
1487 		goto retry;
1488 	}
1489 
1490 	if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1491 		/* Earlier code assumed this would be a VLAN pkt,
1492 		 * double-check this now that we have the actual
1493 		 * packet in hand.
1494 		 */
1495 		struct ethhdr *ehdr;
1496 		skb_reset_mac_header(skb);
1497 		ehdr = eth_hdr(skb);
1498 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1499 			err = -EMSGSIZE;
1500 			goto out_unlock;
1501 		}
1502 	}
1503 
1504 	skb->protocol = proto;
1505 	skb->dev = dev;
1506 	skb->priority = sk->sk_priority;
1507 	skb->mark = sk->sk_mark;
1508 	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1509 	if (err < 0)
1510 		goto out_unlock;
1511 
1512 	if (unlikely(extra_len == 4))
1513 		skb->no_fcs = 1;
1514 
1515 	skb_probe_transport_header(skb, 0);
1516 
1517 	dev_queue_xmit(skb);
1518 	rcu_read_unlock();
1519 	return len;
1520 
1521 out_unlock:
1522 	rcu_read_unlock();
1523 out_free:
1524 	kfree_skb(skb);
1525 	return err;
1526 }
1527 
1528 static unsigned int run_filter(const struct sk_buff *skb,
1529 				      const struct sock *sk,
1530 				      unsigned int res)
1531 {
1532 	struct sk_filter *filter;
1533 
1534 	rcu_read_lock();
1535 	filter = rcu_dereference(sk->sk_filter);
1536 	if (filter != NULL)
1537 		res = SK_RUN_FILTER(filter, skb);
1538 	rcu_read_unlock();
1539 
1540 	return res;
1541 }
1542 
1543 /*
1544  * This function makes lazy skb cloning in hope that most of packets
1545  * are discarded by BPF.
1546  *
1547  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1548  * and skb->cb are mangled. It works because (and until) packets
1549  * falling here are owned by current CPU. Output packets are cloned
1550  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1551  * sequencially, so that if we return skb to original state on exit,
1552  * we will not harm anyone.
1553  */
1554 
1555 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1556 		      struct packet_type *pt, struct net_device *orig_dev)
1557 {
1558 	struct sock *sk;
1559 	struct sockaddr_ll *sll;
1560 	struct packet_sock *po;
1561 	u8 *skb_head = skb->data;
1562 	int skb_len = skb->len;
1563 	unsigned int snaplen, res;
1564 
1565 	if (skb->pkt_type == PACKET_LOOPBACK)
1566 		goto drop;
1567 
1568 	sk = pt->af_packet_priv;
1569 	po = pkt_sk(sk);
1570 
1571 	if (!net_eq(dev_net(dev), sock_net(sk)))
1572 		goto drop;
1573 
1574 	skb->dev = dev;
1575 
1576 	if (dev->header_ops) {
1577 		/* The device has an explicit notion of ll header,
1578 		 * exported to higher levels.
1579 		 *
1580 		 * Otherwise, the device hides details of its frame
1581 		 * structure, so that corresponding packet head is
1582 		 * never delivered to user.
1583 		 */
1584 		if (sk->sk_type != SOCK_DGRAM)
1585 			skb_push(skb, skb->data - skb_mac_header(skb));
1586 		else if (skb->pkt_type == PACKET_OUTGOING) {
1587 			/* Special case: outgoing packets have ll header at head */
1588 			skb_pull(skb, skb_network_offset(skb));
1589 		}
1590 	}
1591 
1592 	snaplen = skb->len;
1593 
1594 	res = run_filter(skb, sk, snaplen);
1595 	if (!res)
1596 		goto drop_n_restore;
1597 	if (snaplen > res)
1598 		snaplen = res;
1599 
1600 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1601 		goto drop_n_acct;
1602 
1603 	if (skb_shared(skb)) {
1604 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1605 		if (nskb == NULL)
1606 			goto drop_n_acct;
1607 
1608 		if (skb_head != skb->data) {
1609 			skb->data = skb_head;
1610 			skb->len = skb_len;
1611 		}
1612 		consume_skb(skb);
1613 		skb = nskb;
1614 	}
1615 
1616 	BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1617 		     sizeof(skb->cb));
1618 
1619 	sll = &PACKET_SKB_CB(skb)->sa.ll;
1620 	sll->sll_family = AF_PACKET;
1621 	sll->sll_hatype = dev->type;
1622 	sll->sll_protocol = skb->protocol;
1623 	sll->sll_pkttype = skb->pkt_type;
1624 	if (unlikely(po->origdev))
1625 		sll->sll_ifindex = orig_dev->ifindex;
1626 	else
1627 		sll->sll_ifindex = dev->ifindex;
1628 
1629 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1630 
1631 	PACKET_SKB_CB(skb)->origlen = skb->len;
1632 
1633 	if (pskb_trim(skb, snaplen))
1634 		goto drop_n_acct;
1635 
1636 	skb_set_owner_r(skb, sk);
1637 	skb->dev = NULL;
1638 	skb_dst_drop(skb);
1639 
1640 	/* drop conntrack reference */
1641 	nf_reset(skb);
1642 
1643 	spin_lock(&sk->sk_receive_queue.lock);
1644 	po->stats.tp_packets++;
1645 	skb->dropcount = atomic_read(&sk->sk_drops);
1646 	__skb_queue_tail(&sk->sk_receive_queue, skb);
1647 	spin_unlock(&sk->sk_receive_queue.lock);
1648 	sk->sk_data_ready(sk, skb->len);
1649 	return 0;
1650 
1651 drop_n_acct:
1652 	spin_lock(&sk->sk_receive_queue.lock);
1653 	po->stats.tp_drops++;
1654 	atomic_inc(&sk->sk_drops);
1655 	spin_unlock(&sk->sk_receive_queue.lock);
1656 
1657 drop_n_restore:
1658 	if (skb_head != skb->data && skb_shared(skb)) {
1659 		skb->data = skb_head;
1660 		skb->len = skb_len;
1661 	}
1662 drop:
1663 	consume_skb(skb);
1664 	return 0;
1665 }
1666 
1667 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1668 		       struct packet_type *pt, struct net_device *orig_dev)
1669 {
1670 	struct sock *sk;
1671 	struct packet_sock *po;
1672 	struct sockaddr_ll *sll;
1673 	union {
1674 		struct tpacket_hdr *h1;
1675 		struct tpacket2_hdr *h2;
1676 		struct tpacket3_hdr *h3;
1677 		void *raw;
1678 	} h;
1679 	u8 *skb_head = skb->data;
1680 	int skb_len = skb->len;
1681 	unsigned int snaplen, res;
1682 	unsigned long status = TP_STATUS_USER;
1683 	unsigned short macoff, netoff, hdrlen;
1684 	struct sk_buff *copy_skb = NULL;
1685 	struct timeval tv;
1686 	struct timespec ts;
1687 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1688 
1689 	if (skb->pkt_type == PACKET_LOOPBACK)
1690 		goto drop;
1691 
1692 	sk = pt->af_packet_priv;
1693 	po = pkt_sk(sk);
1694 
1695 	if (!net_eq(dev_net(dev), sock_net(sk)))
1696 		goto drop;
1697 
1698 	if (dev->header_ops) {
1699 		if (sk->sk_type != SOCK_DGRAM)
1700 			skb_push(skb, skb->data - skb_mac_header(skb));
1701 		else if (skb->pkt_type == PACKET_OUTGOING) {
1702 			/* Special case: outgoing packets have ll header at head */
1703 			skb_pull(skb, skb_network_offset(skb));
1704 		}
1705 	}
1706 
1707 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1708 		status |= TP_STATUS_CSUMNOTREADY;
1709 
1710 	snaplen = skb->len;
1711 
1712 	res = run_filter(skb, sk, snaplen);
1713 	if (!res)
1714 		goto drop_n_restore;
1715 	if (snaplen > res)
1716 		snaplen = res;
1717 
1718 	if (sk->sk_type == SOCK_DGRAM) {
1719 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1720 				  po->tp_reserve;
1721 	} else {
1722 		unsigned int maclen = skb_network_offset(skb);
1723 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
1724 				       (maclen < 16 ? 16 : maclen)) +
1725 			po->tp_reserve;
1726 		macoff = netoff - maclen;
1727 	}
1728 	if (po->tp_version <= TPACKET_V2) {
1729 		if (macoff + snaplen > po->rx_ring.frame_size) {
1730 			if (po->copy_thresh &&
1731 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1732 				if (skb_shared(skb)) {
1733 					copy_skb = skb_clone(skb, GFP_ATOMIC);
1734 				} else {
1735 					copy_skb = skb_get(skb);
1736 					skb_head = skb->data;
1737 				}
1738 				if (copy_skb)
1739 					skb_set_owner_r(copy_skb, sk);
1740 			}
1741 			snaplen = po->rx_ring.frame_size - macoff;
1742 			if ((int)snaplen < 0)
1743 				snaplen = 0;
1744 		}
1745 	}
1746 	spin_lock(&sk->sk_receive_queue.lock);
1747 	h.raw = packet_current_rx_frame(po, skb,
1748 					TP_STATUS_KERNEL, (macoff+snaplen));
1749 	if (!h.raw)
1750 		goto ring_is_full;
1751 	if (po->tp_version <= TPACKET_V2) {
1752 		packet_increment_rx_head(po, &po->rx_ring);
1753 	/*
1754 	 * LOSING will be reported till you read the stats,
1755 	 * because it's COR - Clear On Read.
1756 	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1757 	 * at packet level.
1758 	 */
1759 		if (po->stats.tp_drops)
1760 			status |= TP_STATUS_LOSING;
1761 	}
1762 	po->stats.tp_packets++;
1763 	if (copy_skb) {
1764 		status |= TP_STATUS_COPY;
1765 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1766 	}
1767 	spin_unlock(&sk->sk_receive_queue.lock);
1768 
1769 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1770 
1771 	switch (po->tp_version) {
1772 	case TPACKET_V1:
1773 		h.h1->tp_len = skb->len;
1774 		h.h1->tp_snaplen = snaplen;
1775 		h.h1->tp_mac = macoff;
1776 		h.h1->tp_net = netoff;
1777 		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1778 				&& shhwtstamps->syststamp.tv64)
1779 			tv = ktime_to_timeval(shhwtstamps->syststamp);
1780 		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1781 				&& shhwtstamps->hwtstamp.tv64)
1782 			tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1783 		else if (skb->tstamp.tv64)
1784 			tv = ktime_to_timeval(skb->tstamp);
1785 		else
1786 			do_gettimeofday(&tv);
1787 		h.h1->tp_sec = tv.tv_sec;
1788 		h.h1->tp_usec = tv.tv_usec;
1789 		hdrlen = sizeof(*h.h1);
1790 		break;
1791 	case TPACKET_V2:
1792 		h.h2->tp_len = skb->len;
1793 		h.h2->tp_snaplen = snaplen;
1794 		h.h2->tp_mac = macoff;
1795 		h.h2->tp_net = netoff;
1796 		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1797 				&& shhwtstamps->syststamp.tv64)
1798 			ts = ktime_to_timespec(shhwtstamps->syststamp);
1799 		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1800 				&& shhwtstamps->hwtstamp.tv64)
1801 			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1802 		else if (skb->tstamp.tv64)
1803 			ts = ktime_to_timespec(skb->tstamp);
1804 		else
1805 			getnstimeofday(&ts);
1806 		h.h2->tp_sec = ts.tv_sec;
1807 		h.h2->tp_nsec = ts.tv_nsec;
1808 		if (vlan_tx_tag_present(skb)) {
1809 			h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1810 			status |= TP_STATUS_VLAN_VALID;
1811 		} else {
1812 			h.h2->tp_vlan_tci = 0;
1813 		}
1814 		h.h2->tp_padding = 0;
1815 		hdrlen = sizeof(*h.h2);
1816 		break;
1817 	case TPACKET_V3:
1818 		/* tp_nxt_offset,vlan are already populated above.
1819 		 * So DONT clear those fields here
1820 		 */
1821 		h.h3->tp_status |= status;
1822 		h.h3->tp_len = skb->len;
1823 		h.h3->tp_snaplen = snaplen;
1824 		h.h3->tp_mac = macoff;
1825 		h.h3->tp_net = netoff;
1826 		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1827 				&& shhwtstamps->syststamp.tv64)
1828 			ts = ktime_to_timespec(shhwtstamps->syststamp);
1829 		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1830 				&& shhwtstamps->hwtstamp.tv64)
1831 			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1832 		else if (skb->tstamp.tv64)
1833 			ts = ktime_to_timespec(skb->tstamp);
1834 		else
1835 			getnstimeofday(&ts);
1836 		h.h3->tp_sec  = ts.tv_sec;
1837 		h.h3->tp_nsec = ts.tv_nsec;
1838 		hdrlen = sizeof(*h.h3);
1839 		break;
1840 	default:
1841 		BUG();
1842 	}
1843 
1844 	sll = h.raw + TPACKET_ALIGN(hdrlen);
1845 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1846 	sll->sll_family = AF_PACKET;
1847 	sll->sll_hatype = dev->type;
1848 	sll->sll_protocol = skb->protocol;
1849 	sll->sll_pkttype = skb->pkt_type;
1850 	if (unlikely(po->origdev))
1851 		sll->sll_ifindex = orig_dev->ifindex;
1852 	else
1853 		sll->sll_ifindex = dev->ifindex;
1854 
1855 	smp_mb();
1856 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1857 	{
1858 		u8 *start, *end;
1859 
1860 		if (po->tp_version <= TPACKET_V2) {
1861 			end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1862 				+ macoff + snaplen);
1863 			for (start = h.raw; start < end; start += PAGE_SIZE)
1864 				flush_dcache_page(pgv_to_page(start));
1865 		}
1866 		smp_wmb();
1867 	}
1868 #endif
1869 	if (po->tp_version <= TPACKET_V2)
1870 		__packet_set_status(po, h.raw, status);
1871 	else
1872 		prb_clear_blk_fill_status(&po->rx_ring);
1873 
1874 	sk->sk_data_ready(sk, 0);
1875 
1876 drop_n_restore:
1877 	if (skb_head != skb->data && skb_shared(skb)) {
1878 		skb->data = skb_head;
1879 		skb->len = skb_len;
1880 	}
1881 drop:
1882 	kfree_skb(skb);
1883 	return 0;
1884 
1885 ring_is_full:
1886 	po->stats.tp_drops++;
1887 	spin_unlock(&sk->sk_receive_queue.lock);
1888 
1889 	sk->sk_data_ready(sk, 0);
1890 	kfree_skb(copy_skb);
1891 	goto drop_n_restore;
1892 }
1893 
1894 static void tpacket_destruct_skb(struct sk_buff *skb)
1895 {
1896 	struct packet_sock *po = pkt_sk(skb->sk);
1897 	void *ph;
1898 
1899 	if (likely(po->tx_ring.pg_vec)) {
1900 		ph = skb_shinfo(skb)->destructor_arg;
1901 		BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1902 		atomic_dec(&po->tx_ring.pending);
1903 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1904 	}
1905 
1906 	sock_wfree(skb);
1907 }
1908 
1909 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1910 		void *frame, struct net_device *dev, int size_max,
1911 		__be16 proto, unsigned char *addr, int hlen)
1912 {
1913 	union {
1914 		struct tpacket_hdr *h1;
1915 		struct tpacket2_hdr *h2;
1916 		void *raw;
1917 	} ph;
1918 	int to_write, offset, len, tp_len, nr_frags, len_max;
1919 	struct socket *sock = po->sk.sk_socket;
1920 	struct page *page;
1921 	void *data;
1922 	int err;
1923 
1924 	ph.raw = frame;
1925 
1926 	skb->protocol = proto;
1927 	skb->dev = dev;
1928 	skb->priority = po->sk.sk_priority;
1929 	skb->mark = po->sk.sk_mark;
1930 	skb_shinfo(skb)->destructor_arg = ph.raw;
1931 
1932 	switch (po->tp_version) {
1933 	case TPACKET_V2:
1934 		tp_len = ph.h2->tp_len;
1935 		break;
1936 	default:
1937 		tp_len = ph.h1->tp_len;
1938 		break;
1939 	}
1940 	if (unlikely(tp_len > size_max)) {
1941 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1942 		return -EMSGSIZE;
1943 	}
1944 
1945 	skb_reserve(skb, hlen);
1946 	skb_reset_network_header(skb);
1947 	skb_probe_transport_header(skb, 0);
1948 
1949 	if (po->tp_tx_has_off) {
1950 		int off_min, off_max, off;
1951 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1952 		off_max = po->tx_ring.frame_size - tp_len;
1953 		if (sock->type == SOCK_DGRAM) {
1954 			switch (po->tp_version) {
1955 			case TPACKET_V2:
1956 				off = ph.h2->tp_net;
1957 				break;
1958 			default:
1959 				off = ph.h1->tp_net;
1960 				break;
1961 			}
1962 		} else {
1963 			switch (po->tp_version) {
1964 			case TPACKET_V2:
1965 				off = ph.h2->tp_mac;
1966 				break;
1967 			default:
1968 				off = ph.h1->tp_mac;
1969 				break;
1970 			}
1971 		}
1972 		if (unlikely((off < off_min) || (off_max < off)))
1973 			return -EINVAL;
1974 		data = ph.raw + off;
1975 	} else {
1976 		data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1977 	}
1978 	to_write = tp_len;
1979 
1980 	if (sock->type == SOCK_DGRAM) {
1981 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
1982 				NULL, tp_len);
1983 		if (unlikely(err < 0))
1984 			return -EINVAL;
1985 	} else if (dev->hard_header_len) {
1986 		/* net device doesn't like empty head */
1987 		if (unlikely(tp_len <= dev->hard_header_len)) {
1988 			pr_err("packet size is too short (%d < %d)\n",
1989 			       tp_len, dev->hard_header_len);
1990 			return -EINVAL;
1991 		}
1992 
1993 		skb_push(skb, dev->hard_header_len);
1994 		err = skb_store_bits(skb, 0, data,
1995 				dev->hard_header_len);
1996 		if (unlikely(err))
1997 			return err;
1998 
1999 		data += dev->hard_header_len;
2000 		to_write -= dev->hard_header_len;
2001 	}
2002 
2003 	offset = offset_in_page(data);
2004 	len_max = PAGE_SIZE - offset;
2005 	len = ((to_write > len_max) ? len_max : to_write);
2006 
2007 	skb->data_len = to_write;
2008 	skb->len += to_write;
2009 	skb->truesize += to_write;
2010 	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2011 
2012 	while (likely(to_write)) {
2013 		nr_frags = skb_shinfo(skb)->nr_frags;
2014 
2015 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2016 			pr_err("Packet exceed the number of skb frags(%lu)\n",
2017 			       MAX_SKB_FRAGS);
2018 			return -EFAULT;
2019 		}
2020 
2021 		page = pgv_to_page(data);
2022 		data += len;
2023 		flush_dcache_page(page);
2024 		get_page(page);
2025 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2026 		to_write -= len;
2027 		offset = 0;
2028 		len_max = PAGE_SIZE;
2029 		len = ((to_write > len_max) ? len_max : to_write);
2030 	}
2031 
2032 	return tp_len;
2033 }
2034 
2035 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2036 {
2037 	struct sk_buff *skb;
2038 	struct net_device *dev;
2039 	__be16 proto;
2040 	bool need_rls_dev = false;
2041 	int err, reserve = 0;
2042 	void *ph;
2043 	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2044 	int tp_len, size_max;
2045 	unsigned char *addr;
2046 	int len_sum = 0;
2047 	int status = TP_STATUS_AVAILABLE;
2048 	int hlen, tlen;
2049 
2050 	mutex_lock(&po->pg_vec_lock);
2051 
2052 	if (saddr == NULL) {
2053 		dev = po->prot_hook.dev;
2054 		proto	= po->num;
2055 		addr	= NULL;
2056 	} else {
2057 		err = -EINVAL;
2058 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2059 			goto out;
2060 		if (msg->msg_namelen < (saddr->sll_halen
2061 					+ offsetof(struct sockaddr_ll,
2062 						sll_addr)))
2063 			goto out;
2064 		proto	= saddr->sll_protocol;
2065 		addr	= saddr->sll_addr;
2066 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2067 		need_rls_dev = true;
2068 	}
2069 
2070 	err = -ENXIO;
2071 	if (unlikely(dev == NULL))
2072 		goto out;
2073 
2074 	reserve = dev->hard_header_len;
2075 
2076 	err = -ENETDOWN;
2077 	if (unlikely(!(dev->flags & IFF_UP)))
2078 		goto out_put;
2079 
2080 	size_max = po->tx_ring.frame_size
2081 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2082 
2083 	if (size_max > dev->mtu + reserve)
2084 		size_max = dev->mtu + reserve;
2085 
2086 	do {
2087 		ph = packet_current_frame(po, &po->tx_ring,
2088 				TP_STATUS_SEND_REQUEST);
2089 
2090 		if (unlikely(ph == NULL)) {
2091 			schedule();
2092 			continue;
2093 		}
2094 
2095 		status = TP_STATUS_SEND_REQUEST;
2096 		hlen = LL_RESERVED_SPACE(dev);
2097 		tlen = dev->needed_tailroom;
2098 		skb = sock_alloc_send_skb(&po->sk,
2099 				hlen + tlen + sizeof(struct sockaddr_ll),
2100 				0, &err);
2101 
2102 		if (unlikely(skb == NULL))
2103 			goto out_status;
2104 
2105 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2106 				addr, hlen);
2107 
2108 		if (unlikely(tp_len < 0)) {
2109 			if (po->tp_loss) {
2110 				__packet_set_status(po, ph,
2111 						TP_STATUS_AVAILABLE);
2112 				packet_increment_head(&po->tx_ring);
2113 				kfree_skb(skb);
2114 				continue;
2115 			} else {
2116 				status = TP_STATUS_WRONG_FORMAT;
2117 				err = tp_len;
2118 				goto out_status;
2119 			}
2120 		}
2121 
2122 		skb->destructor = tpacket_destruct_skb;
2123 		__packet_set_status(po, ph, TP_STATUS_SENDING);
2124 		atomic_inc(&po->tx_ring.pending);
2125 
2126 		status = TP_STATUS_SEND_REQUEST;
2127 		err = dev_queue_xmit(skb);
2128 		if (unlikely(err > 0)) {
2129 			err = net_xmit_errno(err);
2130 			if (err && __packet_get_status(po, ph) ==
2131 				   TP_STATUS_AVAILABLE) {
2132 				/* skb was destructed already */
2133 				skb = NULL;
2134 				goto out_status;
2135 			}
2136 			/*
2137 			 * skb was dropped but not destructed yet;
2138 			 * let's treat it like congestion or err < 0
2139 			 */
2140 			err = 0;
2141 		}
2142 		packet_increment_head(&po->tx_ring);
2143 		len_sum += tp_len;
2144 	} while (likely((ph != NULL) ||
2145 			((!(msg->msg_flags & MSG_DONTWAIT)) &&
2146 			 (atomic_read(&po->tx_ring.pending))))
2147 		);
2148 
2149 	err = len_sum;
2150 	goto out_put;
2151 
2152 out_status:
2153 	__packet_set_status(po, ph, status);
2154 	kfree_skb(skb);
2155 out_put:
2156 	if (need_rls_dev)
2157 		dev_put(dev);
2158 out:
2159 	mutex_unlock(&po->pg_vec_lock);
2160 	return err;
2161 }
2162 
2163 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2164 				        size_t reserve, size_t len,
2165 				        size_t linear, int noblock,
2166 				        int *err)
2167 {
2168 	struct sk_buff *skb;
2169 
2170 	/* Under a page?  Don't bother with paged skb. */
2171 	if (prepad + len < PAGE_SIZE || !linear)
2172 		linear = len;
2173 
2174 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2175 				   err);
2176 	if (!skb)
2177 		return NULL;
2178 
2179 	skb_reserve(skb, reserve);
2180 	skb_put(skb, linear);
2181 	skb->data_len = len - linear;
2182 	skb->len += len - linear;
2183 
2184 	return skb;
2185 }
2186 
2187 static int packet_snd(struct socket *sock,
2188 			  struct msghdr *msg, size_t len)
2189 {
2190 	struct sock *sk = sock->sk;
2191 	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2192 	struct sk_buff *skb;
2193 	struct net_device *dev;
2194 	__be16 proto;
2195 	bool need_rls_dev = false;
2196 	unsigned char *addr;
2197 	int err, reserve = 0;
2198 	struct virtio_net_hdr vnet_hdr = { 0 };
2199 	int offset = 0;
2200 	int vnet_hdr_len;
2201 	struct packet_sock *po = pkt_sk(sk);
2202 	unsigned short gso_type = 0;
2203 	int hlen, tlen;
2204 	int extra_len = 0;
2205 
2206 	/*
2207 	 *	Get and verify the address.
2208 	 */
2209 
2210 	if (saddr == NULL) {
2211 		dev = po->prot_hook.dev;
2212 		proto	= po->num;
2213 		addr	= NULL;
2214 	} else {
2215 		err = -EINVAL;
2216 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2217 			goto out;
2218 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2219 			goto out;
2220 		proto	= saddr->sll_protocol;
2221 		addr	= saddr->sll_addr;
2222 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2223 		need_rls_dev = true;
2224 	}
2225 
2226 	err = -ENXIO;
2227 	if (dev == NULL)
2228 		goto out_unlock;
2229 	if (sock->type == SOCK_RAW)
2230 		reserve = dev->hard_header_len;
2231 
2232 	err = -ENETDOWN;
2233 	if (!(dev->flags & IFF_UP))
2234 		goto out_unlock;
2235 
2236 	if (po->has_vnet_hdr) {
2237 		vnet_hdr_len = sizeof(vnet_hdr);
2238 
2239 		err = -EINVAL;
2240 		if (len < vnet_hdr_len)
2241 			goto out_unlock;
2242 
2243 		len -= vnet_hdr_len;
2244 
2245 		err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2246 				       vnet_hdr_len);
2247 		if (err < 0)
2248 			goto out_unlock;
2249 
2250 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2251 		    (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2252 		      vnet_hdr.hdr_len))
2253 			vnet_hdr.hdr_len = vnet_hdr.csum_start +
2254 						 vnet_hdr.csum_offset + 2;
2255 
2256 		err = -EINVAL;
2257 		if (vnet_hdr.hdr_len > len)
2258 			goto out_unlock;
2259 
2260 		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2261 			switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2262 			case VIRTIO_NET_HDR_GSO_TCPV4:
2263 				gso_type = SKB_GSO_TCPV4;
2264 				break;
2265 			case VIRTIO_NET_HDR_GSO_TCPV6:
2266 				gso_type = SKB_GSO_TCPV6;
2267 				break;
2268 			case VIRTIO_NET_HDR_GSO_UDP:
2269 				gso_type = SKB_GSO_UDP;
2270 				break;
2271 			default:
2272 				goto out_unlock;
2273 			}
2274 
2275 			if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2276 				gso_type |= SKB_GSO_TCP_ECN;
2277 
2278 			if (vnet_hdr.gso_size == 0)
2279 				goto out_unlock;
2280 
2281 		}
2282 	}
2283 
2284 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2285 		if (!netif_supports_nofcs(dev)) {
2286 			err = -EPROTONOSUPPORT;
2287 			goto out_unlock;
2288 		}
2289 		extra_len = 4; /* We're doing our own CRC */
2290 	}
2291 
2292 	err = -EMSGSIZE;
2293 	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2294 		goto out_unlock;
2295 
2296 	err = -ENOBUFS;
2297 	hlen = LL_RESERVED_SPACE(dev);
2298 	tlen = dev->needed_tailroom;
2299 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
2300 			       msg->msg_flags & MSG_DONTWAIT, &err);
2301 	if (skb == NULL)
2302 		goto out_unlock;
2303 
2304 	skb_set_network_header(skb, reserve);
2305 
2306 	err = -EINVAL;
2307 	if (sock->type == SOCK_DGRAM &&
2308 	    (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2309 		goto out_free;
2310 
2311 	/* Returns -EFAULT on error */
2312 	err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2313 	if (err)
2314 		goto out_free;
2315 	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2316 	if (err < 0)
2317 		goto out_free;
2318 
2319 	if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2320 		/* Earlier code assumed this would be a VLAN pkt,
2321 		 * double-check this now that we have the actual
2322 		 * packet in hand.
2323 		 */
2324 		struct ethhdr *ehdr;
2325 		skb_reset_mac_header(skb);
2326 		ehdr = eth_hdr(skb);
2327 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2328 			err = -EMSGSIZE;
2329 			goto out_free;
2330 		}
2331 	}
2332 
2333 	skb->protocol = proto;
2334 	skb->dev = dev;
2335 	skb->priority = sk->sk_priority;
2336 	skb->mark = sk->sk_mark;
2337 
2338 	if (po->has_vnet_hdr) {
2339 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2340 			if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2341 						  vnet_hdr.csum_offset)) {
2342 				err = -EINVAL;
2343 				goto out_free;
2344 			}
2345 		}
2346 
2347 		skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2348 		skb_shinfo(skb)->gso_type = gso_type;
2349 
2350 		/* Header must be checked, and gso_segs computed. */
2351 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2352 		skb_shinfo(skb)->gso_segs = 0;
2353 
2354 		len += vnet_hdr_len;
2355 	}
2356 
2357 	skb_probe_transport_header(skb, reserve);
2358 
2359 	if (unlikely(extra_len == 4))
2360 		skb->no_fcs = 1;
2361 
2362 	/*
2363 	 *	Now send it
2364 	 */
2365 
2366 	err = dev_queue_xmit(skb);
2367 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2368 		goto out_unlock;
2369 
2370 	if (need_rls_dev)
2371 		dev_put(dev);
2372 
2373 	return len;
2374 
2375 out_free:
2376 	kfree_skb(skb);
2377 out_unlock:
2378 	if (dev && need_rls_dev)
2379 		dev_put(dev);
2380 out:
2381 	return err;
2382 }
2383 
2384 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2385 		struct msghdr *msg, size_t len)
2386 {
2387 	struct sock *sk = sock->sk;
2388 	struct packet_sock *po = pkt_sk(sk);
2389 	if (po->tx_ring.pg_vec)
2390 		return tpacket_snd(po, msg);
2391 	else
2392 		return packet_snd(sock, msg, len);
2393 }
2394 
2395 /*
2396  *	Close a PACKET socket. This is fairly simple. We immediately go
2397  *	to 'closed' state and remove our protocol entry in the device list.
2398  */
2399 
2400 static int packet_release(struct socket *sock)
2401 {
2402 	struct sock *sk = sock->sk;
2403 	struct packet_sock *po;
2404 	struct net *net;
2405 	union tpacket_req_u req_u;
2406 
2407 	if (!sk)
2408 		return 0;
2409 
2410 	net = sock_net(sk);
2411 	po = pkt_sk(sk);
2412 
2413 	mutex_lock(&net->packet.sklist_lock);
2414 	sk_del_node_init_rcu(sk);
2415 	mutex_unlock(&net->packet.sklist_lock);
2416 
2417 	preempt_disable();
2418 	sock_prot_inuse_add(net, sk->sk_prot, -1);
2419 	preempt_enable();
2420 
2421 	spin_lock(&po->bind_lock);
2422 	unregister_prot_hook(sk, false);
2423 	if (po->prot_hook.dev) {
2424 		dev_put(po->prot_hook.dev);
2425 		po->prot_hook.dev = NULL;
2426 	}
2427 	spin_unlock(&po->bind_lock);
2428 
2429 	packet_flush_mclist(sk);
2430 
2431 	if (po->rx_ring.pg_vec) {
2432 		memset(&req_u, 0, sizeof(req_u));
2433 		packet_set_ring(sk, &req_u, 1, 0);
2434 	}
2435 
2436 	if (po->tx_ring.pg_vec) {
2437 		memset(&req_u, 0, sizeof(req_u));
2438 		packet_set_ring(sk, &req_u, 1, 1);
2439 	}
2440 
2441 	fanout_release(sk);
2442 
2443 	synchronize_net();
2444 	/*
2445 	 *	Now the socket is dead. No more input will appear.
2446 	 */
2447 	sock_orphan(sk);
2448 	sock->sk = NULL;
2449 
2450 	/* Purge queues */
2451 
2452 	skb_queue_purge(&sk->sk_receive_queue);
2453 	sk_refcnt_debug_release(sk);
2454 
2455 	sock_put(sk);
2456 	return 0;
2457 }
2458 
2459 /*
2460  *	Attach a packet hook.
2461  */
2462 
2463 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
2464 {
2465 	struct packet_sock *po = pkt_sk(sk);
2466 
2467 	if (po->fanout) {
2468 		if (dev)
2469 			dev_put(dev);
2470 
2471 		return -EINVAL;
2472 	}
2473 
2474 	lock_sock(sk);
2475 
2476 	spin_lock(&po->bind_lock);
2477 	unregister_prot_hook(sk, true);
2478 	po->num = protocol;
2479 	po->prot_hook.type = protocol;
2480 	if (po->prot_hook.dev)
2481 		dev_put(po->prot_hook.dev);
2482 	po->prot_hook.dev = dev;
2483 
2484 	po->ifindex = dev ? dev->ifindex : 0;
2485 
2486 	if (protocol == 0)
2487 		goto out_unlock;
2488 
2489 	if (!dev || (dev->flags & IFF_UP)) {
2490 		register_prot_hook(sk);
2491 	} else {
2492 		sk->sk_err = ENETDOWN;
2493 		if (!sock_flag(sk, SOCK_DEAD))
2494 			sk->sk_error_report(sk);
2495 	}
2496 
2497 out_unlock:
2498 	spin_unlock(&po->bind_lock);
2499 	release_sock(sk);
2500 	return 0;
2501 }
2502 
2503 /*
2504  *	Bind a packet socket to a device
2505  */
2506 
2507 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2508 			    int addr_len)
2509 {
2510 	struct sock *sk = sock->sk;
2511 	char name[15];
2512 	struct net_device *dev;
2513 	int err = -ENODEV;
2514 
2515 	/*
2516 	 *	Check legality
2517 	 */
2518 
2519 	if (addr_len != sizeof(struct sockaddr))
2520 		return -EINVAL;
2521 	strlcpy(name, uaddr->sa_data, sizeof(name));
2522 
2523 	dev = dev_get_by_name(sock_net(sk), name);
2524 	if (dev)
2525 		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2526 	return err;
2527 }
2528 
2529 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2530 {
2531 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2532 	struct sock *sk = sock->sk;
2533 	struct net_device *dev = NULL;
2534 	int err;
2535 
2536 
2537 	/*
2538 	 *	Check legality
2539 	 */
2540 
2541 	if (addr_len < sizeof(struct sockaddr_ll))
2542 		return -EINVAL;
2543 	if (sll->sll_family != AF_PACKET)
2544 		return -EINVAL;
2545 
2546 	if (sll->sll_ifindex) {
2547 		err = -ENODEV;
2548 		dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2549 		if (dev == NULL)
2550 			goto out;
2551 	}
2552 	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2553 
2554 out:
2555 	return err;
2556 }
2557 
2558 static struct proto packet_proto = {
2559 	.name	  = "PACKET",
2560 	.owner	  = THIS_MODULE,
2561 	.obj_size = sizeof(struct packet_sock),
2562 };
2563 
2564 /*
2565  *	Create a packet of type SOCK_PACKET.
2566  */
2567 
2568 static int packet_create(struct net *net, struct socket *sock, int protocol,
2569 			 int kern)
2570 {
2571 	struct sock *sk;
2572 	struct packet_sock *po;
2573 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
2574 	int err;
2575 
2576 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
2577 		return -EPERM;
2578 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2579 	    sock->type != SOCK_PACKET)
2580 		return -ESOCKTNOSUPPORT;
2581 
2582 	sock->state = SS_UNCONNECTED;
2583 
2584 	err = -ENOBUFS;
2585 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2586 	if (sk == NULL)
2587 		goto out;
2588 
2589 	sock->ops = &packet_ops;
2590 	if (sock->type == SOCK_PACKET)
2591 		sock->ops = &packet_ops_spkt;
2592 
2593 	sock_init_data(sock, sk);
2594 
2595 	po = pkt_sk(sk);
2596 	sk->sk_family = PF_PACKET;
2597 	po->num = proto;
2598 
2599 	sk->sk_destruct = packet_sock_destruct;
2600 	sk_refcnt_debug_inc(sk);
2601 
2602 	/*
2603 	 *	Attach a protocol block
2604 	 */
2605 
2606 	spin_lock_init(&po->bind_lock);
2607 	mutex_init(&po->pg_vec_lock);
2608 	po->prot_hook.func = packet_rcv;
2609 
2610 	if (sock->type == SOCK_PACKET)
2611 		po->prot_hook.func = packet_rcv_spkt;
2612 
2613 	po->prot_hook.af_packet_priv = sk;
2614 
2615 	if (proto) {
2616 		po->prot_hook.type = proto;
2617 		register_prot_hook(sk);
2618 	}
2619 
2620 	mutex_lock(&net->packet.sklist_lock);
2621 	sk_add_node_rcu(sk, &net->packet.sklist);
2622 	mutex_unlock(&net->packet.sklist_lock);
2623 
2624 	preempt_disable();
2625 	sock_prot_inuse_add(net, &packet_proto, 1);
2626 	preempt_enable();
2627 
2628 	return 0;
2629 out:
2630 	return err;
2631 }
2632 
2633 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2634 {
2635 	struct sock_exterr_skb *serr;
2636 	struct sk_buff *skb, *skb2;
2637 	int copied, err;
2638 
2639 	err = -EAGAIN;
2640 	skb = skb_dequeue(&sk->sk_error_queue);
2641 	if (skb == NULL)
2642 		goto out;
2643 
2644 	copied = skb->len;
2645 	if (copied > len) {
2646 		msg->msg_flags |= MSG_TRUNC;
2647 		copied = len;
2648 	}
2649 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2650 	if (err)
2651 		goto out_free_skb;
2652 
2653 	sock_recv_timestamp(msg, sk, skb);
2654 
2655 	serr = SKB_EXT_ERR(skb);
2656 	put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2657 		 sizeof(serr->ee), &serr->ee);
2658 
2659 	msg->msg_flags |= MSG_ERRQUEUE;
2660 	err = copied;
2661 
2662 	/* Reset and regenerate socket error */
2663 	spin_lock_bh(&sk->sk_error_queue.lock);
2664 	sk->sk_err = 0;
2665 	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2666 		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2667 		spin_unlock_bh(&sk->sk_error_queue.lock);
2668 		sk->sk_error_report(sk);
2669 	} else
2670 		spin_unlock_bh(&sk->sk_error_queue.lock);
2671 
2672 out_free_skb:
2673 	kfree_skb(skb);
2674 out:
2675 	return err;
2676 }
2677 
2678 /*
2679  *	Pull a packet from our receive queue and hand it to the user.
2680  *	If necessary we block.
2681  */
2682 
2683 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2684 			  struct msghdr *msg, size_t len, int flags)
2685 {
2686 	struct sock *sk = sock->sk;
2687 	struct sk_buff *skb;
2688 	int copied, err;
2689 	struct sockaddr_ll *sll;
2690 	int vnet_hdr_len = 0;
2691 
2692 	err = -EINVAL;
2693 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2694 		goto out;
2695 
2696 #if 0
2697 	/* What error should we return now? EUNATTACH? */
2698 	if (pkt_sk(sk)->ifindex < 0)
2699 		return -ENODEV;
2700 #endif
2701 
2702 	if (flags & MSG_ERRQUEUE) {
2703 		err = packet_recv_error(sk, msg, len);
2704 		goto out;
2705 	}
2706 
2707 	/*
2708 	 *	Call the generic datagram receiver. This handles all sorts
2709 	 *	of horrible races and re-entrancy so we can forget about it
2710 	 *	in the protocol layers.
2711 	 *
2712 	 *	Now it will return ENETDOWN, if device have just gone down,
2713 	 *	but then it will block.
2714 	 */
2715 
2716 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2717 
2718 	/*
2719 	 *	An error occurred so return it. Because skb_recv_datagram()
2720 	 *	handles the blocking we don't see and worry about blocking
2721 	 *	retries.
2722 	 */
2723 
2724 	if (skb == NULL)
2725 		goto out;
2726 
2727 	if (pkt_sk(sk)->has_vnet_hdr) {
2728 		struct virtio_net_hdr vnet_hdr = { 0 };
2729 
2730 		err = -EINVAL;
2731 		vnet_hdr_len = sizeof(vnet_hdr);
2732 		if (len < vnet_hdr_len)
2733 			goto out_free;
2734 
2735 		len -= vnet_hdr_len;
2736 
2737 		if (skb_is_gso(skb)) {
2738 			struct skb_shared_info *sinfo = skb_shinfo(skb);
2739 
2740 			/* This is a hint as to how much should be linear. */
2741 			vnet_hdr.hdr_len = skb_headlen(skb);
2742 			vnet_hdr.gso_size = sinfo->gso_size;
2743 			if (sinfo->gso_type & SKB_GSO_TCPV4)
2744 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2745 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
2746 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2747 			else if (sinfo->gso_type & SKB_GSO_UDP)
2748 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2749 			else if (sinfo->gso_type & SKB_GSO_FCOE)
2750 				goto out_free;
2751 			else
2752 				BUG();
2753 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2754 				vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2755 		} else
2756 			vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2757 
2758 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2759 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2760 			vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2761 			vnet_hdr.csum_offset = skb->csum_offset;
2762 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2763 			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2764 		} /* else everything is zero */
2765 
2766 		err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2767 				     vnet_hdr_len);
2768 		if (err < 0)
2769 			goto out_free;
2770 	}
2771 
2772 	/*
2773 	 *	If the address length field is there to be filled in, we fill
2774 	 *	it in now.
2775 	 */
2776 
2777 	sll = &PACKET_SKB_CB(skb)->sa.ll;
2778 	if (sock->type == SOCK_PACKET)
2779 		msg->msg_namelen = sizeof(struct sockaddr_pkt);
2780 	else
2781 		msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2782 
2783 	/*
2784 	 *	You lose any data beyond the buffer you gave. If it worries a
2785 	 *	user program they can ask the device for its MTU anyway.
2786 	 */
2787 
2788 	copied = skb->len;
2789 	if (copied > len) {
2790 		copied = len;
2791 		msg->msg_flags |= MSG_TRUNC;
2792 	}
2793 
2794 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2795 	if (err)
2796 		goto out_free;
2797 
2798 	sock_recv_ts_and_drops(msg, sk, skb);
2799 
2800 	if (msg->msg_name)
2801 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2802 		       msg->msg_namelen);
2803 
2804 	if (pkt_sk(sk)->auxdata) {
2805 		struct tpacket_auxdata aux;
2806 
2807 		aux.tp_status = TP_STATUS_USER;
2808 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2809 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2810 		aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2811 		aux.tp_snaplen = skb->len;
2812 		aux.tp_mac = 0;
2813 		aux.tp_net = skb_network_offset(skb);
2814 		if (vlan_tx_tag_present(skb)) {
2815 			aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2816 			aux.tp_status |= TP_STATUS_VLAN_VALID;
2817 		} else {
2818 			aux.tp_vlan_tci = 0;
2819 		}
2820 		aux.tp_padding = 0;
2821 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2822 	}
2823 
2824 	/*
2825 	 *	Free or return the buffer as appropriate. Again this
2826 	 *	hides all the races and re-entrancy issues from us.
2827 	 */
2828 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2829 
2830 out_free:
2831 	skb_free_datagram(sk, skb);
2832 out:
2833 	return err;
2834 }
2835 
2836 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2837 			       int *uaddr_len, int peer)
2838 {
2839 	struct net_device *dev;
2840 	struct sock *sk	= sock->sk;
2841 
2842 	if (peer)
2843 		return -EOPNOTSUPP;
2844 
2845 	uaddr->sa_family = AF_PACKET;
2846 	rcu_read_lock();
2847 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2848 	if (dev)
2849 		strncpy(uaddr->sa_data, dev->name, 14);
2850 	else
2851 		memset(uaddr->sa_data, 0, 14);
2852 	rcu_read_unlock();
2853 	*uaddr_len = sizeof(*uaddr);
2854 
2855 	return 0;
2856 }
2857 
2858 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2859 			  int *uaddr_len, int peer)
2860 {
2861 	struct net_device *dev;
2862 	struct sock *sk = sock->sk;
2863 	struct packet_sock *po = pkt_sk(sk);
2864 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2865 
2866 	if (peer)
2867 		return -EOPNOTSUPP;
2868 
2869 	sll->sll_family = AF_PACKET;
2870 	sll->sll_ifindex = po->ifindex;
2871 	sll->sll_protocol = po->num;
2872 	sll->sll_pkttype = 0;
2873 	rcu_read_lock();
2874 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2875 	if (dev) {
2876 		sll->sll_hatype = dev->type;
2877 		sll->sll_halen = dev->addr_len;
2878 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2879 	} else {
2880 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
2881 		sll->sll_halen = 0;
2882 	}
2883 	rcu_read_unlock();
2884 	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2885 
2886 	return 0;
2887 }
2888 
2889 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2890 			 int what)
2891 {
2892 	switch (i->type) {
2893 	case PACKET_MR_MULTICAST:
2894 		if (i->alen != dev->addr_len)
2895 			return -EINVAL;
2896 		if (what > 0)
2897 			return dev_mc_add(dev, i->addr);
2898 		else
2899 			return dev_mc_del(dev, i->addr);
2900 		break;
2901 	case PACKET_MR_PROMISC:
2902 		return dev_set_promiscuity(dev, what);
2903 		break;
2904 	case PACKET_MR_ALLMULTI:
2905 		return dev_set_allmulti(dev, what);
2906 		break;
2907 	case PACKET_MR_UNICAST:
2908 		if (i->alen != dev->addr_len)
2909 			return -EINVAL;
2910 		if (what > 0)
2911 			return dev_uc_add(dev, i->addr);
2912 		else
2913 			return dev_uc_del(dev, i->addr);
2914 		break;
2915 	default:
2916 		break;
2917 	}
2918 	return 0;
2919 }
2920 
2921 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2922 {
2923 	for ( ; i; i = i->next) {
2924 		if (i->ifindex == dev->ifindex)
2925 			packet_dev_mc(dev, i, what);
2926 	}
2927 }
2928 
2929 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2930 {
2931 	struct packet_sock *po = pkt_sk(sk);
2932 	struct packet_mclist *ml, *i;
2933 	struct net_device *dev;
2934 	int err;
2935 
2936 	rtnl_lock();
2937 
2938 	err = -ENODEV;
2939 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2940 	if (!dev)
2941 		goto done;
2942 
2943 	err = -EINVAL;
2944 	if (mreq->mr_alen > dev->addr_len)
2945 		goto done;
2946 
2947 	err = -ENOBUFS;
2948 	i = kmalloc(sizeof(*i), GFP_KERNEL);
2949 	if (i == NULL)
2950 		goto done;
2951 
2952 	err = 0;
2953 	for (ml = po->mclist; ml; ml = ml->next) {
2954 		if (ml->ifindex == mreq->mr_ifindex &&
2955 		    ml->type == mreq->mr_type &&
2956 		    ml->alen == mreq->mr_alen &&
2957 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2958 			ml->count++;
2959 			/* Free the new element ... */
2960 			kfree(i);
2961 			goto done;
2962 		}
2963 	}
2964 
2965 	i->type = mreq->mr_type;
2966 	i->ifindex = mreq->mr_ifindex;
2967 	i->alen = mreq->mr_alen;
2968 	memcpy(i->addr, mreq->mr_address, i->alen);
2969 	i->count = 1;
2970 	i->next = po->mclist;
2971 	po->mclist = i;
2972 	err = packet_dev_mc(dev, i, 1);
2973 	if (err) {
2974 		po->mclist = i->next;
2975 		kfree(i);
2976 	}
2977 
2978 done:
2979 	rtnl_unlock();
2980 	return err;
2981 }
2982 
2983 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2984 {
2985 	struct packet_mclist *ml, **mlp;
2986 
2987 	rtnl_lock();
2988 
2989 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2990 		if (ml->ifindex == mreq->mr_ifindex &&
2991 		    ml->type == mreq->mr_type &&
2992 		    ml->alen == mreq->mr_alen &&
2993 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2994 			if (--ml->count == 0) {
2995 				struct net_device *dev;
2996 				*mlp = ml->next;
2997 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2998 				if (dev)
2999 					packet_dev_mc(dev, ml, -1);
3000 				kfree(ml);
3001 			}
3002 			rtnl_unlock();
3003 			return 0;
3004 		}
3005 	}
3006 	rtnl_unlock();
3007 	return -EADDRNOTAVAIL;
3008 }
3009 
3010 static void packet_flush_mclist(struct sock *sk)
3011 {
3012 	struct packet_sock *po = pkt_sk(sk);
3013 	struct packet_mclist *ml;
3014 
3015 	if (!po->mclist)
3016 		return;
3017 
3018 	rtnl_lock();
3019 	while ((ml = po->mclist) != NULL) {
3020 		struct net_device *dev;
3021 
3022 		po->mclist = ml->next;
3023 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3024 		if (dev != NULL)
3025 			packet_dev_mc(dev, ml, -1);
3026 		kfree(ml);
3027 	}
3028 	rtnl_unlock();
3029 }
3030 
3031 static int
3032 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3033 {
3034 	struct sock *sk = sock->sk;
3035 	struct packet_sock *po = pkt_sk(sk);
3036 	int ret;
3037 
3038 	if (level != SOL_PACKET)
3039 		return -ENOPROTOOPT;
3040 
3041 	switch (optname) {
3042 	case PACKET_ADD_MEMBERSHIP:
3043 	case PACKET_DROP_MEMBERSHIP:
3044 	{
3045 		struct packet_mreq_max mreq;
3046 		int len = optlen;
3047 		memset(&mreq, 0, sizeof(mreq));
3048 		if (len < sizeof(struct packet_mreq))
3049 			return -EINVAL;
3050 		if (len > sizeof(mreq))
3051 			len = sizeof(mreq);
3052 		if (copy_from_user(&mreq, optval, len))
3053 			return -EFAULT;
3054 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3055 			return -EINVAL;
3056 		if (optname == PACKET_ADD_MEMBERSHIP)
3057 			ret = packet_mc_add(sk, &mreq);
3058 		else
3059 			ret = packet_mc_drop(sk, &mreq);
3060 		return ret;
3061 	}
3062 
3063 	case PACKET_RX_RING:
3064 	case PACKET_TX_RING:
3065 	{
3066 		union tpacket_req_u req_u;
3067 		int len;
3068 
3069 		switch (po->tp_version) {
3070 		case TPACKET_V1:
3071 		case TPACKET_V2:
3072 			len = sizeof(req_u.req);
3073 			break;
3074 		case TPACKET_V3:
3075 		default:
3076 			len = sizeof(req_u.req3);
3077 			break;
3078 		}
3079 		if (optlen < len)
3080 			return -EINVAL;
3081 		if (pkt_sk(sk)->has_vnet_hdr)
3082 			return -EINVAL;
3083 		if (copy_from_user(&req_u.req, optval, len))
3084 			return -EFAULT;
3085 		return packet_set_ring(sk, &req_u, 0,
3086 			optname == PACKET_TX_RING);
3087 	}
3088 	case PACKET_COPY_THRESH:
3089 	{
3090 		int val;
3091 
3092 		if (optlen != sizeof(val))
3093 			return -EINVAL;
3094 		if (copy_from_user(&val, optval, sizeof(val)))
3095 			return -EFAULT;
3096 
3097 		pkt_sk(sk)->copy_thresh = val;
3098 		return 0;
3099 	}
3100 	case PACKET_VERSION:
3101 	{
3102 		int val;
3103 
3104 		if (optlen != sizeof(val))
3105 			return -EINVAL;
3106 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3107 			return -EBUSY;
3108 		if (copy_from_user(&val, optval, sizeof(val)))
3109 			return -EFAULT;
3110 		switch (val) {
3111 		case TPACKET_V1:
3112 		case TPACKET_V2:
3113 		case TPACKET_V3:
3114 			po->tp_version = val;
3115 			return 0;
3116 		default:
3117 			return -EINVAL;
3118 		}
3119 	}
3120 	case PACKET_RESERVE:
3121 	{
3122 		unsigned int val;
3123 
3124 		if (optlen != sizeof(val))
3125 			return -EINVAL;
3126 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3127 			return -EBUSY;
3128 		if (copy_from_user(&val, optval, sizeof(val)))
3129 			return -EFAULT;
3130 		po->tp_reserve = val;
3131 		return 0;
3132 	}
3133 	case PACKET_LOSS:
3134 	{
3135 		unsigned int val;
3136 
3137 		if (optlen != sizeof(val))
3138 			return -EINVAL;
3139 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3140 			return -EBUSY;
3141 		if (copy_from_user(&val, optval, sizeof(val)))
3142 			return -EFAULT;
3143 		po->tp_loss = !!val;
3144 		return 0;
3145 	}
3146 	case PACKET_AUXDATA:
3147 	{
3148 		int val;
3149 
3150 		if (optlen < sizeof(val))
3151 			return -EINVAL;
3152 		if (copy_from_user(&val, optval, sizeof(val)))
3153 			return -EFAULT;
3154 
3155 		po->auxdata = !!val;
3156 		return 0;
3157 	}
3158 	case PACKET_ORIGDEV:
3159 	{
3160 		int val;
3161 
3162 		if (optlen < sizeof(val))
3163 			return -EINVAL;
3164 		if (copy_from_user(&val, optval, sizeof(val)))
3165 			return -EFAULT;
3166 
3167 		po->origdev = !!val;
3168 		return 0;
3169 	}
3170 	case PACKET_VNET_HDR:
3171 	{
3172 		int val;
3173 
3174 		if (sock->type != SOCK_RAW)
3175 			return -EINVAL;
3176 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3177 			return -EBUSY;
3178 		if (optlen < sizeof(val))
3179 			return -EINVAL;
3180 		if (copy_from_user(&val, optval, sizeof(val)))
3181 			return -EFAULT;
3182 
3183 		po->has_vnet_hdr = !!val;
3184 		return 0;
3185 	}
3186 	case PACKET_TIMESTAMP:
3187 	{
3188 		int val;
3189 
3190 		if (optlen != sizeof(val))
3191 			return -EINVAL;
3192 		if (copy_from_user(&val, optval, sizeof(val)))
3193 			return -EFAULT;
3194 
3195 		po->tp_tstamp = val;
3196 		return 0;
3197 	}
3198 	case PACKET_FANOUT:
3199 	{
3200 		int val;
3201 
3202 		if (optlen != sizeof(val))
3203 			return -EINVAL;
3204 		if (copy_from_user(&val, optval, sizeof(val)))
3205 			return -EFAULT;
3206 
3207 		return fanout_add(sk, val & 0xffff, val >> 16);
3208 	}
3209 	case PACKET_TX_HAS_OFF:
3210 	{
3211 		unsigned int val;
3212 
3213 		if (optlen != sizeof(val))
3214 			return -EINVAL;
3215 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3216 			return -EBUSY;
3217 		if (copy_from_user(&val, optval, sizeof(val)))
3218 			return -EFAULT;
3219 		po->tp_tx_has_off = !!val;
3220 		return 0;
3221 	}
3222 	default:
3223 		return -ENOPROTOOPT;
3224 	}
3225 }
3226 
3227 static int packet_getsockopt(struct socket *sock, int level, int optname,
3228 			     char __user *optval, int __user *optlen)
3229 {
3230 	int len;
3231 	int val, lv = sizeof(val);
3232 	struct sock *sk = sock->sk;
3233 	struct packet_sock *po = pkt_sk(sk);
3234 	void *data = &val;
3235 	struct tpacket_stats st;
3236 	union tpacket_stats_u st_u;
3237 
3238 	if (level != SOL_PACKET)
3239 		return -ENOPROTOOPT;
3240 
3241 	if (get_user(len, optlen))
3242 		return -EFAULT;
3243 
3244 	if (len < 0)
3245 		return -EINVAL;
3246 
3247 	switch (optname) {
3248 	case PACKET_STATISTICS:
3249 		spin_lock_bh(&sk->sk_receive_queue.lock);
3250 		if (po->tp_version == TPACKET_V3) {
3251 			lv = sizeof(struct tpacket_stats_v3);
3252 			memcpy(&st_u.stats3, &po->stats,
3253 			       sizeof(struct tpacket_stats));
3254 			st_u.stats3.tp_freeze_q_cnt =
3255 					po->stats_u.stats3.tp_freeze_q_cnt;
3256 			st_u.stats3.tp_packets += po->stats.tp_drops;
3257 			data = &st_u.stats3;
3258 		} else {
3259 			lv = sizeof(struct tpacket_stats);
3260 			st = po->stats;
3261 			st.tp_packets += st.tp_drops;
3262 			data = &st;
3263 		}
3264 		memset(&po->stats, 0, sizeof(st));
3265 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3266 		break;
3267 	case PACKET_AUXDATA:
3268 		val = po->auxdata;
3269 		break;
3270 	case PACKET_ORIGDEV:
3271 		val = po->origdev;
3272 		break;
3273 	case PACKET_VNET_HDR:
3274 		val = po->has_vnet_hdr;
3275 		break;
3276 	case PACKET_VERSION:
3277 		val = po->tp_version;
3278 		break;
3279 	case PACKET_HDRLEN:
3280 		if (len > sizeof(int))
3281 			len = sizeof(int);
3282 		if (copy_from_user(&val, optval, len))
3283 			return -EFAULT;
3284 		switch (val) {
3285 		case TPACKET_V1:
3286 			val = sizeof(struct tpacket_hdr);
3287 			break;
3288 		case TPACKET_V2:
3289 			val = sizeof(struct tpacket2_hdr);
3290 			break;
3291 		case TPACKET_V3:
3292 			val = sizeof(struct tpacket3_hdr);
3293 			break;
3294 		default:
3295 			return -EINVAL;
3296 		}
3297 		break;
3298 	case PACKET_RESERVE:
3299 		val = po->tp_reserve;
3300 		break;
3301 	case PACKET_LOSS:
3302 		val = po->tp_loss;
3303 		break;
3304 	case PACKET_TIMESTAMP:
3305 		val = po->tp_tstamp;
3306 		break;
3307 	case PACKET_FANOUT:
3308 		val = (po->fanout ?
3309 		       ((u32)po->fanout->id |
3310 			((u32)po->fanout->type << 16) |
3311 			((u32)po->fanout->flags << 24)) :
3312 		       0);
3313 		break;
3314 	case PACKET_TX_HAS_OFF:
3315 		val = po->tp_tx_has_off;
3316 		break;
3317 	default:
3318 		return -ENOPROTOOPT;
3319 	}
3320 
3321 	if (len > lv)
3322 		len = lv;
3323 	if (put_user(len, optlen))
3324 		return -EFAULT;
3325 	if (copy_to_user(optval, data, len))
3326 		return -EFAULT;
3327 	return 0;
3328 }
3329 
3330 
3331 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3332 {
3333 	struct sock *sk;
3334 	struct net_device *dev = data;
3335 	struct net *net = dev_net(dev);
3336 
3337 	rcu_read_lock();
3338 	sk_for_each_rcu(sk, &net->packet.sklist) {
3339 		struct packet_sock *po = pkt_sk(sk);
3340 
3341 		switch (msg) {
3342 		case NETDEV_UNREGISTER:
3343 			if (po->mclist)
3344 				packet_dev_mclist(dev, po->mclist, -1);
3345 			/* fallthrough */
3346 
3347 		case NETDEV_DOWN:
3348 			if (dev->ifindex == po->ifindex) {
3349 				spin_lock(&po->bind_lock);
3350 				if (po->running) {
3351 					__unregister_prot_hook(sk, false);
3352 					sk->sk_err = ENETDOWN;
3353 					if (!sock_flag(sk, SOCK_DEAD))
3354 						sk->sk_error_report(sk);
3355 				}
3356 				if (msg == NETDEV_UNREGISTER) {
3357 					po->ifindex = -1;
3358 					if (po->prot_hook.dev)
3359 						dev_put(po->prot_hook.dev);
3360 					po->prot_hook.dev = NULL;
3361 				}
3362 				spin_unlock(&po->bind_lock);
3363 			}
3364 			break;
3365 		case NETDEV_UP:
3366 			if (dev->ifindex == po->ifindex) {
3367 				spin_lock(&po->bind_lock);
3368 				if (po->num)
3369 					register_prot_hook(sk);
3370 				spin_unlock(&po->bind_lock);
3371 			}
3372 			break;
3373 		}
3374 	}
3375 	rcu_read_unlock();
3376 	return NOTIFY_DONE;
3377 }
3378 
3379 
3380 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3381 			unsigned long arg)
3382 {
3383 	struct sock *sk = sock->sk;
3384 
3385 	switch (cmd) {
3386 	case SIOCOUTQ:
3387 	{
3388 		int amount = sk_wmem_alloc_get(sk);
3389 
3390 		return put_user(amount, (int __user *)arg);
3391 	}
3392 	case SIOCINQ:
3393 	{
3394 		struct sk_buff *skb;
3395 		int amount = 0;
3396 
3397 		spin_lock_bh(&sk->sk_receive_queue.lock);
3398 		skb = skb_peek(&sk->sk_receive_queue);
3399 		if (skb)
3400 			amount = skb->len;
3401 		spin_unlock_bh(&sk->sk_receive_queue.lock);
3402 		return put_user(amount, (int __user *)arg);
3403 	}
3404 	case SIOCGSTAMP:
3405 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3406 	case SIOCGSTAMPNS:
3407 		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3408 
3409 #ifdef CONFIG_INET
3410 	case SIOCADDRT:
3411 	case SIOCDELRT:
3412 	case SIOCDARP:
3413 	case SIOCGARP:
3414 	case SIOCSARP:
3415 	case SIOCGIFADDR:
3416 	case SIOCSIFADDR:
3417 	case SIOCGIFBRDADDR:
3418 	case SIOCSIFBRDADDR:
3419 	case SIOCGIFNETMASK:
3420 	case SIOCSIFNETMASK:
3421 	case SIOCGIFDSTADDR:
3422 	case SIOCSIFDSTADDR:
3423 	case SIOCSIFFLAGS:
3424 		return inet_dgram_ops.ioctl(sock, cmd, arg);
3425 #endif
3426 
3427 	default:
3428 		return -ENOIOCTLCMD;
3429 	}
3430 	return 0;
3431 }
3432 
3433 static unsigned int packet_poll(struct file *file, struct socket *sock,
3434 				poll_table *wait)
3435 {
3436 	struct sock *sk = sock->sk;
3437 	struct packet_sock *po = pkt_sk(sk);
3438 	unsigned int mask = datagram_poll(file, sock, wait);
3439 
3440 	spin_lock_bh(&sk->sk_receive_queue.lock);
3441 	if (po->rx_ring.pg_vec) {
3442 		if (!packet_previous_rx_frame(po, &po->rx_ring,
3443 			TP_STATUS_KERNEL))
3444 			mask |= POLLIN | POLLRDNORM;
3445 	}
3446 	spin_unlock_bh(&sk->sk_receive_queue.lock);
3447 	spin_lock_bh(&sk->sk_write_queue.lock);
3448 	if (po->tx_ring.pg_vec) {
3449 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3450 			mask |= POLLOUT | POLLWRNORM;
3451 	}
3452 	spin_unlock_bh(&sk->sk_write_queue.lock);
3453 	return mask;
3454 }
3455 
3456 
3457 /* Dirty? Well, I still did not learn better way to account
3458  * for user mmaps.
3459  */
3460 
3461 static void packet_mm_open(struct vm_area_struct *vma)
3462 {
3463 	struct file *file = vma->vm_file;
3464 	struct socket *sock = file->private_data;
3465 	struct sock *sk = sock->sk;
3466 
3467 	if (sk)
3468 		atomic_inc(&pkt_sk(sk)->mapped);
3469 }
3470 
3471 static void packet_mm_close(struct vm_area_struct *vma)
3472 {
3473 	struct file *file = vma->vm_file;
3474 	struct socket *sock = file->private_data;
3475 	struct sock *sk = sock->sk;
3476 
3477 	if (sk)
3478 		atomic_dec(&pkt_sk(sk)->mapped);
3479 }
3480 
3481 static const struct vm_operations_struct packet_mmap_ops = {
3482 	.open	=	packet_mm_open,
3483 	.close	=	packet_mm_close,
3484 };
3485 
3486 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3487 			unsigned int len)
3488 {
3489 	int i;
3490 
3491 	for (i = 0; i < len; i++) {
3492 		if (likely(pg_vec[i].buffer)) {
3493 			if (is_vmalloc_addr(pg_vec[i].buffer))
3494 				vfree(pg_vec[i].buffer);
3495 			else
3496 				free_pages((unsigned long)pg_vec[i].buffer,
3497 					   order);
3498 			pg_vec[i].buffer = NULL;
3499 		}
3500 	}
3501 	kfree(pg_vec);
3502 }
3503 
3504 static char *alloc_one_pg_vec_page(unsigned long order)
3505 {
3506 	char *buffer = NULL;
3507 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3508 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3509 
3510 	buffer = (char *) __get_free_pages(gfp_flags, order);
3511 
3512 	if (buffer)
3513 		return buffer;
3514 
3515 	/*
3516 	 * __get_free_pages failed, fall back to vmalloc
3517 	 */
3518 	buffer = vzalloc((1 << order) * PAGE_SIZE);
3519 
3520 	if (buffer)
3521 		return buffer;
3522 
3523 	/*
3524 	 * vmalloc failed, lets dig into swap here
3525 	 */
3526 	gfp_flags &= ~__GFP_NORETRY;
3527 	buffer = (char *)__get_free_pages(gfp_flags, order);
3528 	if (buffer)
3529 		return buffer;
3530 
3531 	/*
3532 	 * complete and utter failure
3533 	 */
3534 	return NULL;
3535 }
3536 
3537 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3538 {
3539 	unsigned int block_nr = req->tp_block_nr;
3540 	struct pgv *pg_vec;
3541 	int i;
3542 
3543 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3544 	if (unlikely(!pg_vec))
3545 		goto out;
3546 
3547 	for (i = 0; i < block_nr; i++) {
3548 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3549 		if (unlikely(!pg_vec[i].buffer))
3550 			goto out_free_pgvec;
3551 	}
3552 
3553 out:
3554 	return pg_vec;
3555 
3556 out_free_pgvec:
3557 	free_pg_vec(pg_vec, order, block_nr);
3558 	pg_vec = NULL;
3559 	goto out;
3560 }
3561 
3562 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3563 		int closing, int tx_ring)
3564 {
3565 	struct pgv *pg_vec = NULL;
3566 	struct packet_sock *po = pkt_sk(sk);
3567 	int was_running, order = 0;
3568 	struct packet_ring_buffer *rb;
3569 	struct sk_buff_head *rb_queue;
3570 	__be16 num;
3571 	int err = -EINVAL;
3572 	/* Added to avoid minimal code churn */
3573 	struct tpacket_req *req = &req_u->req;
3574 
3575 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3576 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3577 		WARN(1, "Tx-ring is not supported.\n");
3578 		goto out;
3579 	}
3580 
3581 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3582 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3583 
3584 	err = -EBUSY;
3585 	if (!closing) {
3586 		if (atomic_read(&po->mapped))
3587 			goto out;
3588 		if (atomic_read(&rb->pending))
3589 			goto out;
3590 	}
3591 
3592 	if (req->tp_block_nr) {
3593 		/* Sanity tests and some calculations */
3594 		err = -EBUSY;
3595 		if (unlikely(rb->pg_vec))
3596 			goto out;
3597 
3598 		switch (po->tp_version) {
3599 		case TPACKET_V1:
3600 			po->tp_hdrlen = TPACKET_HDRLEN;
3601 			break;
3602 		case TPACKET_V2:
3603 			po->tp_hdrlen = TPACKET2_HDRLEN;
3604 			break;
3605 		case TPACKET_V3:
3606 			po->tp_hdrlen = TPACKET3_HDRLEN;
3607 			break;
3608 		}
3609 
3610 		err = -EINVAL;
3611 		if (unlikely((int)req->tp_block_size <= 0))
3612 			goto out;
3613 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3614 			goto out;
3615 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3616 					po->tp_reserve))
3617 			goto out;
3618 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3619 			goto out;
3620 
3621 		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3622 		if (unlikely(rb->frames_per_block <= 0))
3623 			goto out;
3624 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3625 					req->tp_frame_nr))
3626 			goto out;
3627 
3628 		err = -ENOMEM;
3629 		order = get_order(req->tp_block_size);
3630 		pg_vec = alloc_pg_vec(req, order);
3631 		if (unlikely(!pg_vec))
3632 			goto out;
3633 		switch (po->tp_version) {
3634 		case TPACKET_V3:
3635 		/* Transmit path is not supported. We checked
3636 		 * it above but just being paranoid
3637 		 */
3638 			if (!tx_ring)
3639 				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3640 				break;
3641 		default:
3642 			break;
3643 		}
3644 	}
3645 	/* Done */
3646 	else {
3647 		err = -EINVAL;
3648 		if (unlikely(req->tp_frame_nr))
3649 			goto out;
3650 	}
3651 
3652 	lock_sock(sk);
3653 
3654 	/* Detach socket from network */
3655 	spin_lock(&po->bind_lock);
3656 	was_running = po->running;
3657 	num = po->num;
3658 	if (was_running) {
3659 		po->num = 0;
3660 		__unregister_prot_hook(sk, false);
3661 	}
3662 	spin_unlock(&po->bind_lock);
3663 
3664 	synchronize_net();
3665 
3666 	err = -EBUSY;
3667 	mutex_lock(&po->pg_vec_lock);
3668 	if (closing || atomic_read(&po->mapped) == 0) {
3669 		err = 0;
3670 		spin_lock_bh(&rb_queue->lock);
3671 		swap(rb->pg_vec, pg_vec);
3672 		rb->frame_max = (req->tp_frame_nr - 1);
3673 		rb->head = 0;
3674 		rb->frame_size = req->tp_frame_size;
3675 		spin_unlock_bh(&rb_queue->lock);
3676 
3677 		swap(rb->pg_vec_order, order);
3678 		swap(rb->pg_vec_len, req->tp_block_nr);
3679 
3680 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3681 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
3682 						tpacket_rcv : packet_rcv;
3683 		skb_queue_purge(rb_queue);
3684 		if (atomic_read(&po->mapped))
3685 			pr_err("packet_mmap: vma is busy: %d\n",
3686 			       atomic_read(&po->mapped));
3687 	}
3688 	mutex_unlock(&po->pg_vec_lock);
3689 
3690 	spin_lock(&po->bind_lock);
3691 	if (was_running) {
3692 		po->num = num;
3693 		register_prot_hook(sk);
3694 	}
3695 	spin_unlock(&po->bind_lock);
3696 	if (closing && (po->tp_version > TPACKET_V2)) {
3697 		/* Because we don't support block-based V3 on tx-ring */
3698 		if (!tx_ring)
3699 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3700 	}
3701 	release_sock(sk);
3702 
3703 	if (pg_vec)
3704 		free_pg_vec(pg_vec, order, req->tp_block_nr);
3705 out:
3706 	return err;
3707 }
3708 
3709 static int packet_mmap(struct file *file, struct socket *sock,
3710 		struct vm_area_struct *vma)
3711 {
3712 	struct sock *sk = sock->sk;
3713 	struct packet_sock *po = pkt_sk(sk);
3714 	unsigned long size, expected_size;
3715 	struct packet_ring_buffer *rb;
3716 	unsigned long start;
3717 	int err = -EINVAL;
3718 	int i;
3719 
3720 	if (vma->vm_pgoff)
3721 		return -EINVAL;
3722 
3723 	mutex_lock(&po->pg_vec_lock);
3724 
3725 	expected_size = 0;
3726 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3727 		if (rb->pg_vec) {
3728 			expected_size += rb->pg_vec_len
3729 						* rb->pg_vec_pages
3730 						* PAGE_SIZE;
3731 		}
3732 	}
3733 
3734 	if (expected_size == 0)
3735 		goto out;
3736 
3737 	size = vma->vm_end - vma->vm_start;
3738 	if (size != expected_size)
3739 		goto out;
3740 
3741 	start = vma->vm_start;
3742 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3743 		if (rb->pg_vec == NULL)
3744 			continue;
3745 
3746 		for (i = 0; i < rb->pg_vec_len; i++) {
3747 			struct page *page;
3748 			void *kaddr = rb->pg_vec[i].buffer;
3749 			int pg_num;
3750 
3751 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3752 				page = pgv_to_page(kaddr);
3753 				err = vm_insert_page(vma, start, page);
3754 				if (unlikely(err))
3755 					goto out;
3756 				start += PAGE_SIZE;
3757 				kaddr += PAGE_SIZE;
3758 			}
3759 		}
3760 	}
3761 
3762 	atomic_inc(&po->mapped);
3763 	vma->vm_ops = &packet_mmap_ops;
3764 	err = 0;
3765 
3766 out:
3767 	mutex_unlock(&po->pg_vec_lock);
3768 	return err;
3769 }
3770 
3771 static const struct proto_ops packet_ops_spkt = {
3772 	.family =	PF_PACKET,
3773 	.owner =	THIS_MODULE,
3774 	.release =	packet_release,
3775 	.bind =		packet_bind_spkt,
3776 	.connect =	sock_no_connect,
3777 	.socketpair =	sock_no_socketpair,
3778 	.accept =	sock_no_accept,
3779 	.getname =	packet_getname_spkt,
3780 	.poll =		datagram_poll,
3781 	.ioctl =	packet_ioctl,
3782 	.listen =	sock_no_listen,
3783 	.shutdown =	sock_no_shutdown,
3784 	.setsockopt =	sock_no_setsockopt,
3785 	.getsockopt =	sock_no_getsockopt,
3786 	.sendmsg =	packet_sendmsg_spkt,
3787 	.recvmsg =	packet_recvmsg,
3788 	.mmap =		sock_no_mmap,
3789 	.sendpage =	sock_no_sendpage,
3790 };
3791 
3792 static const struct proto_ops packet_ops = {
3793 	.family =	PF_PACKET,
3794 	.owner =	THIS_MODULE,
3795 	.release =	packet_release,
3796 	.bind =		packet_bind,
3797 	.connect =	sock_no_connect,
3798 	.socketpair =	sock_no_socketpair,
3799 	.accept =	sock_no_accept,
3800 	.getname =	packet_getname,
3801 	.poll =		packet_poll,
3802 	.ioctl =	packet_ioctl,
3803 	.listen =	sock_no_listen,
3804 	.shutdown =	sock_no_shutdown,
3805 	.setsockopt =	packet_setsockopt,
3806 	.getsockopt =	packet_getsockopt,
3807 	.sendmsg =	packet_sendmsg,
3808 	.recvmsg =	packet_recvmsg,
3809 	.mmap =		packet_mmap,
3810 	.sendpage =	sock_no_sendpage,
3811 };
3812 
3813 static const struct net_proto_family packet_family_ops = {
3814 	.family =	PF_PACKET,
3815 	.create =	packet_create,
3816 	.owner	=	THIS_MODULE,
3817 };
3818 
3819 static struct notifier_block packet_netdev_notifier = {
3820 	.notifier_call =	packet_notifier,
3821 };
3822 
3823 #ifdef CONFIG_PROC_FS
3824 
3825 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3826 	__acquires(RCU)
3827 {
3828 	struct net *net = seq_file_net(seq);
3829 
3830 	rcu_read_lock();
3831 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3832 }
3833 
3834 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3835 {
3836 	struct net *net = seq_file_net(seq);
3837 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3838 }
3839 
3840 static void packet_seq_stop(struct seq_file *seq, void *v)
3841 	__releases(RCU)
3842 {
3843 	rcu_read_unlock();
3844 }
3845 
3846 static int packet_seq_show(struct seq_file *seq, void *v)
3847 {
3848 	if (v == SEQ_START_TOKEN)
3849 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
3850 	else {
3851 		struct sock *s = sk_entry(v);
3852 		const struct packet_sock *po = pkt_sk(s);
3853 
3854 		seq_printf(seq,
3855 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
3856 			   s,
3857 			   atomic_read(&s->sk_refcnt),
3858 			   s->sk_type,
3859 			   ntohs(po->num),
3860 			   po->ifindex,
3861 			   po->running,
3862 			   atomic_read(&s->sk_rmem_alloc),
3863 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
3864 			   sock_i_ino(s));
3865 	}
3866 
3867 	return 0;
3868 }
3869 
3870 static const struct seq_operations packet_seq_ops = {
3871 	.start	= packet_seq_start,
3872 	.next	= packet_seq_next,
3873 	.stop	= packet_seq_stop,
3874 	.show	= packet_seq_show,
3875 };
3876 
3877 static int packet_seq_open(struct inode *inode, struct file *file)
3878 {
3879 	return seq_open_net(inode, file, &packet_seq_ops,
3880 			    sizeof(struct seq_net_private));
3881 }
3882 
3883 static const struct file_operations packet_seq_fops = {
3884 	.owner		= THIS_MODULE,
3885 	.open		= packet_seq_open,
3886 	.read		= seq_read,
3887 	.llseek		= seq_lseek,
3888 	.release	= seq_release_net,
3889 };
3890 
3891 #endif
3892 
3893 static int __net_init packet_net_init(struct net *net)
3894 {
3895 	mutex_init(&net->packet.sklist_lock);
3896 	INIT_HLIST_HEAD(&net->packet.sklist);
3897 
3898 	if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
3899 		return -ENOMEM;
3900 
3901 	return 0;
3902 }
3903 
3904 static void __net_exit packet_net_exit(struct net *net)
3905 {
3906 	remove_proc_entry("packet", net->proc_net);
3907 }
3908 
3909 static struct pernet_operations packet_net_ops = {
3910 	.init = packet_net_init,
3911 	.exit = packet_net_exit,
3912 };
3913 
3914 
3915 static void __exit packet_exit(void)
3916 {
3917 	unregister_netdevice_notifier(&packet_netdev_notifier);
3918 	unregister_pernet_subsys(&packet_net_ops);
3919 	sock_unregister(PF_PACKET);
3920 	proto_unregister(&packet_proto);
3921 }
3922 
3923 static int __init packet_init(void)
3924 {
3925 	int rc = proto_register(&packet_proto, 0);
3926 
3927 	if (rc != 0)
3928 		goto out;
3929 
3930 	sock_register(&packet_family_ops);
3931 	register_pernet_subsys(&packet_net_ops);
3932 	register_netdevice_notifier(&packet_netdev_notifier);
3933 out:
3934 	return rc;
3935 }
3936 
3937 module_init(packet_init);
3938 module_exit(packet_exit);
3939 MODULE_LICENSE("GPL");
3940 MODULE_ALIAS_NETPROTO(PF_PACKET);
3941