xref: /linux/net/packet/af_packet.c (revision e978aa7d7d57d04eb5f88a7507c4fb98577def77)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		PACKET - implements raw packet sockets.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *		Alan Cox	:	verify_area() now used correctly
14  *		Alan Cox	:	new skbuff lists, look ma no backlogs!
15  *		Alan Cox	:	tidied skbuff lists.
16  *		Alan Cox	:	Now uses generic datagram routines I
17  *					added. Also fixed the peek/read crash
18  *					from all old Linux datagram code.
19  *		Alan Cox	:	Uses the improved datagram code.
20  *		Alan Cox	:	Added NULL's for socket options.
21  *		Alan Cox	:	Re-commented the code.
22  *		Alan Cox	:	Use new kernel side addressing
23  *		Rob Janssen	:	Correct MTU usage.
24  *		Dave Platt	:	Counter leaks caused by incorrect
25  *					interrupt locking and some slightly
26  *					dubious gcc output. Can you read
27  *					compiler: it said _VOLATILE_
28  *	Richard Kooijman	:	Timestamp fixes.
29  *		Alan Cox	:	New buffers. Use sk->mac.raw.
30  *		Alan Cox	:	sendmsg/recvmsg support.
31  *		Alan Cox	:	Protocol setting support
32  *	Alexey Kuznetsov	:	Untied from IPv4 stack.
33  *	Cyrus Durgin		:	Fixed kerneld for kmod.
34  *	Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
38  *					The convention is that longer addresses
39  *					will simply extend the hardware address
40  *					byte arrays at the end of sockaddr_ll
41  *					and packet_mreq.
42  *		Johann Baudy	:	Added TX RING.
43  *
44  *		This program is free software; you can redistribute it and/or
45  *		modify it under the terms of the GNU General Public License
46  *		as published by the Free Software Foundation; either version
47  *		2 of the License, or (at your option) any later version.
48  *
49  */
50 
51 #include <linux/types.h>
52 #include <linux/mm.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
56 #include <linux/in.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <net/net_namespace.h>
66 #include <net/ip.h>
67 #include <net/protocol.h>
68 #include <linux/skbuff.h>
69 #include <net/sock.h>
70 #include <linux/errno.h>
71 #include <linux/timer.h>
72 #include <asm/system.h>
73 #include <asm/uaccess.h>
74 #include <asm/ioctls.h>
75 #include <asm/page.h>
76 #include <asm/cacheflush.h>
77 #include <asm/io.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
88 
89 #ifdef CONFIG_INET
90 #include <net/inet_common.h>
91 #endif
92 
93 /*
94    Assumptions:
95    - if device has no dev->hard_header routine, it adds and removes ll header
96      inside itself. In this case ll header is invisible outside of device,
97      but higher levels still should reserve dev->hard_header_len.
98      Some devices are enough clever to reallocate skb, when header
99      will not fit to reserved space (tunnel), another ones are silly
100      (PPP).
101    - packet socket receives packets with pulled ll header,
102      so that SOCK_RAW should push it back.
103 
104 On receive:
105 -----------
106 
107 Incoming, dev->hard_header!=NULL
108    mac_header -> ll header
109    data       -> data
110 
111 Outgoing, dev->hard_header!=NULL
112    mac_header -> ll header
113    data       -> ll header
114 
115 Incoming, dev->hard_header==NULL
116    mac_header -> UNKNOWN position. It is very likely, that it points to ll
117 		 header.  PPP makes it, that is wrong, because introduce
118 		 assymetry between rx and tx paths.
119    data       -> data
120 
121 Outgoing, dev->hard_header==NULL
122    mac_header -> data. ll header is still not built!
123    data       -> data
124 
125 Resume
126   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
127 
128 
129 On transmit:
130 ------------
131 
132 dev->hard_header != NULL
133    mac_header -> ll header
134    data       -> ll header
135 
136 dev->hard_header == NULL (ll header is added by device, we cannot control it)
137    mac_header -> data
138    data       -> data
139 
140    We should set nh.raw on output to correct posistion,
141    packet classifier depends on it.
142  */
143 
144 /* Private packet socket structures. */
145 
146 struct packet_mclist {
147 	struct packet_mclist	*next;
148 	int			ifindex;
149 	int			count;
150 	unsigned short		type;
151 	unsigned short		alen;
152 	unsigned char		addr[MAX_ADDR_LEN];
153 };
154 /* identical to struct packet_mreq except it has
155  * a longer address field.
156  */
157 struct packet_mreq_max {
158 	int		mr_ifindex;
159 	unsigned short	mr_type;
160 	unsigned short	mr_alen;
161 	unsigned char	mr_address[MAX_ADDR_LEN];
162 };
163 
164 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
165 		int closing, int tx_ring);
166 
167 struct pgv {
168 	char *buffer;
169 };
170 
171 struct packet_ring_buffer {
172 	struct pgv		*pg_vec;
173 	unsigned int		head;
174 	unsigned int		frames_per_block;
175 	unsigned int		frame_size;
176 	unsigned int		frame_max;
177 
178 	unsigned int		pg_vec_order;
179 	unsigned int		pg_vec_pages;
180 	unsigned int		pg_vec_len;
181 
182 	atomic_t		pending;
183 };
184 
185 struct packet_sock;
186 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
187 
188 static void packet_flush_mclist(struct sock *sk);
189 
190 struct packet_fanout;
191 struct packet_sock {
192 	/* struct sock has to be the first member of packet_sock */
193 	struct sock		sk;
194 	struct packet_fanout	*fanout;
195 	struct tpacket_stats	stats;
196 	struct packet_ring_buffer	rx_ring;
197 	struct packet_ring_buffer	tx_ring;
198 	int			copy_thresh;
199 	spinlock_t		bind_lock;
200 	struct mutex		pg_vec_lock;
201 	unsigned int		running:1,	/* prot_hook is attached*/
202 				auxdata:1,
203 				origdev:1,
204 				has_vnet_hdr:1;
205 	int			ifindex;	/* bound device		*/
206 	__be16			num;
207 	struct packet_mclist	*mclist;
208 	atomic_t		mapped;
209 	enum tpacket_versions	tp_version;
210 	unsigned int		tp_hdrlen;
211 	unsigned int		tp_reserve;
212 	unsigned int		tp_loss:1;
213 	unsigned int		tp_tstamp;
214 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
215 };
216 
217 #define PACKET_FANOUT_MAX	256
218 
219 struct packet_fanout {
220 #ifdef CONFIG_NET_NS
221 	struct net		*net;
222 #endif
223 	unsigned int		num_members;
224 	u16			id;
225 	u8			type;
226 	u8			defrag;
227 	atomic_t		rr_cur;
228 	struct list_head	list;
229 	struct sock		*arr[PACKET_FANOUT_MAX];
230 	spinlock_t		lock;
231 	atomic_t		sk_ref;
232 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
233 };
234 
235 struct packet_skb_cb {
236 	unsigned int origlen;
237 	union {
238 		struct sockaddr_pkt pkt;
239 		struct sockaddr_ll ll;
240 	} sa;
241 };
242 
243 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
244 
245 static inline struct packet_sock *pkt_sk(struct sock *sk)
246 {
247 	return (struct packet_sock *)sk;
248 }
249 
250 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
251 static void __fanout_link(struct sock *sk, struct packet_sock *po);
252 
253 /* register_prot_hook must be invoked with the po->bind_lock held,
254  * or from a context in which asynchronous accesses to the packet
255  * socket is not possible (packet_create()).
256  */
257 static void register_prot_hook(struct sock *sk)
258 {
259 	struct packet_sock *po = pkt_sk(sk);
260 	if (!po->running) {
261 		if (po->fanout)
262 			__fanout_link(sk, po);
263 		else
264 			dev_add_pack(&po->prot_hook);
265 		sock_hold(sk);
266 		po->running = 1;
267 	}
268 }
269 
270 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
271  * held.   If the sync parameter is true, we will temporarily drop
272  * the po->bind_lock and do a synchronize_net to make sure no
273  * asynchronous packet processing paths still refer to the elements
274  * of po->prot_hook.  If the sync parameter is false, it is the
275  * callers responsibility to take care of this.
276  */
277 static void __unregister_prot_hook(struct sock *sk, bool sync)
278 {
279 	struct packet_sock *po = pkt_sk(sk);
280 
281 	po->running = 0;
282 	if (po->fanout)
283 		__fanout_unlink(sk, po);
284 	else
285 		__dev_remove_pack(&po->prot_hook);
286 	__sock_put(sk);
287 
288 	if (sync) {
289 		spin_unlock(&po->bind_lock);
290 		synchronize_net();
291 		spin_lock(&po->bind_lock);
292 	}
293 }
294 
295 static void unregister_prot_hook(struct sock *sk, bool sync)
296 {
297 	struct packet_sock *po = pkt_sk(sk);
298 
299 	if (po->running)
300 		__unregister_prot_hook(sk, sync);
301 }
302 
303 static inline __pure struct page *pgv_to_page(void *addr)
304 {
305 	if (is_vmalloc_addr(addr))
306 		return vmalloc_to_page(addr);
307 	return virt_to_page(addr);
308 }
309 
310 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
311 {
312 	union {
313 		struct tpacket_hdr *h1;
314 		struct tpacket2_hdr *h2;
315 		void *raw;
316 	} h;
317 
318 	h.raw = frame;
319 	switch (po->tp_version) {
320 	case TPACKET_V1:
321 		h.h1->tp_status = status;
322 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
323 		break;
324 	case TPACKET_V2:
325 		h.h2->tp_status = status;
326 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
327 		break;
328 	default:
329 		pr_err("TPACKET version not supported\n");
330 		BUG();
331 	}
332 
333 	smp_wmb();
334 }
335 
336 static int __packet_get_status(struct packet_sock *po, void *frame)
337 {
338 	union {
339 		struct tpacket_hdr *h1;
340 		struct tpacket2_hdr *h2;
341 		void *raw;
342 	} h;
343 
344 	smp_rmb();
345 
346 	h.raw = frame;
347 	switch (po->tp_version) {
348 	case TPACKET_V1:
349 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
350 		return h.h1->tp_status;
351 	case TPACKET_V2:
352 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
353 		return h.h2->tp_status;
354 	default:
355 		pr_err("TPACKET version not supported\n");
356 		BUG();
357 		return 0;
358 	}
359 }
360 
361 static void *packet_lookup_frame(struct packet_sock *po,
362 		struct packet_ring_buffer *rb,
363 		unsigned int position,
364 		int status)
365 {
366 	unsigned int pg_vec_pos, frame_offset;
367 	union {
368 		struct tpacket_hdr *h1;
369 		struct tpacket2_hdr *h2;
370 		void *raw;
371 	} h;
372 
373 	pg_vec_pos = position / rb->frames_per_block;
374 	frame_offset = position % rb->frames_per_block;
375 
376 	h.raw = rb->pg_vec[pg_vec_pos].buffer +
377 		(frame_offset * rb->frame_size);
378 
379 	if (status != __packet_get_status(po, h.raw))
380 		return NULL;
381 
382 	return h.raw;
383 }
384 
385 static inline void *packet_current_frame(struct packet_sock *po,
386 		struct packet_ring_buffer *rb,
387 		int status)
388 {
389 	return packet_lookup_frame(po, rb, rb->head, status);
390 }
391 
392 static inline void *packet_previous_frame(struct packet_sock *po,
393 		struct packet_ring_buffer *rb,
394 		int status)
395 {
396 	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
397 	return packet_lookup_frame(po, rb, previous, status);
398 }
399 
400 static inline void packet_increment_head(struct packet_ring_buffer *buff)
401 {
402 	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
403 }
404 
405 static void packet_sock_destruct(struct sock *sk)
406 {
407 	skb_queue_purge(&sk->sk_error_queue);
408 
409 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
410 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
411 
412 	if (!sock_flag(sk, SOCK_DEAD)) {
413 		pr_err("Attempt to release alive packet socket: %p\n", sk);
414 		return;
415 	}
416 
417 	sk_refcnt_debug_dec(sk);
418 }
419 
420 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
421 {
422 	int x = atomic_read(&f->rr_cur) + 1;
423 
424 	if (x >= num)
425 		x = 0;
426 
427 	return x;
428 }
429 
430 static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
431 {
432 	u32 idx, hash = skb->rxhash;
433 
434 	idx = ((u64)hash * num) >> 32;
435 
436 	return f->arr[idx];
437 }
438 
439 static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
440 {
441 	int cur, old;
442 
443 	cur = atomic_read(&f->rr_cur);
444 	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
445 				     fanout_rr_next(f, num))) != cur)
446 		cur = old;
447 	return f->arr[cur];
448 }
449 
450 static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
451 {
452 	unsigned int cpu = smp_processor_id();
453 
454 	return f->arr[cpu % num];
455 }
456 
457 static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
458 {
459 #ifdef CONFIG_INET
460 	const struct iphdr *iph;
461 	u32 len;
462 
463 	if (skb->protocol != htons(ETH_P_IP))
464 		return skb;
465 
466 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
467 		return skb;
468 
469 	iph = ip_hdr(skb);
470 	if (iph->ihl < 5 || iph->version != 4)
471 		return skb;
472 	if (!pskb_may_pull(skb, iph->ihl*4))
473 		return skb;
474 	iph = ip_hdr(skb);
475 	len = ntohs(iph->tot_len);
476 	if (skb->len < len || len < (iph->ihl * 4))
477 		return skb;
478 
479 	if (ip_is_fragment(ip_hdr(skb))) {
480 		skb = skb_share_check(skb, GFP_ATOMIC);
481 		if (skb) {
482 			if (pskb_trim_rcsum(skb, len))
483 				return skb;
484 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
485 			if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
486 				return NULL;
487 			skb->rxhash = 0;
488 		}
489 	}
490 #endif
491 	return skb;
492 }
493 
494 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
495 			     struct packet_type *pt, struct net_device *orig_dev)
496 {
497 	struct packet_fanout *f = pt->af_packet_priv;
498 	unsigned int num = f->num_members;
499 	struct packet_sock *po;
500 	struct sock *sk;
501 
502 	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
503 	    !num) {
504 		kfree_skb(skb);
505 		return 0;
506 	}
507 
508 	switch (f->type) {
509 	case PACKET_FANOUT_HASH:
510 	default:
511 		if (f->defrag) {
512 			skb = fanout_check_defrag(skb);
513 			if (!skb)
514 				return 0;
515 		}
516 		skb_get_rxhash(skb);
517 		sk = fanout_demux_hash(f, skb, num);
518 		break;
519 	case PACKET_FANOUT_LB:
520 		sk = fanout_demux_lb(f, skb, num);
521 		break;
522 	case PACKET_FANOUT_CPU:
523 		sk = fanout_demux_cpu(f, skb, num);
524 		break;
525 	}
526 
527 	po = pkt_sk(sk);
528 
529 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
530 }
531 
532 static DEFINE_MUTEX(fanout_mutex);
533 static LIST_HEAD(fanout_list);
534 
535 static void __fanout_link(struct sock *sk, struct packet_sock *po)
536 {
537 	struct packet_fanout *f = po->fanout;
538 
539 	spin_lock(&f->lock);
540 	f->arr[f->num_members] = sk;
541 	smp_wmb();
542 	f->num_members++;
543 	spin_unlock(&f->lock);
544 }
545 
546 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
547 {
548 	struct packet_fanout *f = po->fanout;
549 	int i;
550 
551 	spin_lock(&f->lock);
552 	for (i = 0; i < f->num_members; i++) {
553 		if (f->arr[i] == sk)
554 			break;
555 	}
556 	BUG_ON(i >= f->num_members);
557 	f->arr[i] = f->arr[f->num_members - 1];
558 	f->num_members--;
559 	spin_unlock(&f->lock);
560 }
561 
562 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
563 {
564 	struct packet_sock *po = pkt_sk(sk);
565 	struct packet_fanout *f, *match;
566 	u8 type = type_flags & 0xff;
567 	u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
568 	int err;
569 
570 	switch (type) {
571 	case PACKET_FANOUT_HASH:
572 	case PACKET_FANOUT_LB:
573 	case PACKET_FANOUT_CPU:
574 		break;
575 	default:
576 		return -EINVAL;
577 	}
578 
579 	if (!po->running)
580 		return -EINVAL;
581 
582 	if (po->fanout)
583 		return -EALREADY;
584 
585 	mutex_lock(&fanout_mutex);
586 	match = NULL;
587 	list_for_each_entry(f, &fanout_list, list) {
588 		if (f->id == id &&
589 		    read_pnet(&f->net) == sock_net(sk)) {
590 			match = f;
591 			break;
592 		}
593 	}
594 	err = -EINVAL;
595 	if (match && match->defrag != defrag)
596 		goto out;
597 	if (!match) {
598 		err = -ENOMEM;
599 		match = kzalloc(sizeof(*match), GFP_KERNEL);
600 		if (!match)
601 			goto out;
602 		write_pnet(&match->net, sock_net(sk));
603 		match->id = id;
604 		match->type = type;
605 		match->defrag = defrag;
606 		atomic_set(&match->rr_cur, 0);
607 		INIT_LIST_HEAD(&match->list);
608 		spin_lock_init(&match->lock);
609 		atomic_set(&match->sk_ref, 0);
610 		match->prot_hook.type = po->prot_hook.type;
611 		match->prot_hook.dev = po->prot_hook.dev;
612 		match->prot_hook.func = packet_rcv_fanout;
613 		match->prot_hook.af_packet_priv = match;
614 		dev_add_pack(&match->prot_hook);
615 		list_add(&match->list, &fanout_list);
616 	}
617 	err = -EINVAL;
618 	if (match->type == type &&
619 	    match->prot_hook.type == po->prot_hook.type &&
620 	    match->prot_hook.dev == po->prot_hook.dev) {
621 		err = -ENOSPC;
622 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
623 			__dev_remove_pack(&po->prot_hook);
624 			po->fanout = match;
625 			atomic_inc(&match->sk_ref);
626 			__fanout_link(sk, po);
627 			err = 0;
628 		}
629 	}
630 out:
631 	mutex_unlock(&fanout_mutex);
632 	return err;
633 }
634 
635 static void fanout_release(struct sock *sk)
636 {
637 	struct packet_sock *po = pkt_sk(sk);
638 	struct packet_fanout *f;
639 
640 	f = po->fanout;
641 	if (!f)
642 		return;
643 
644 	po->fanout = NULL;
645 
646 	mutex_lock(&fanout_mutex);
647 	if (atomic_dec_and_test(&f->sk_ref)) {
648 		list_del(&f->list);
649 		dev_remove_pack(&f->prot_hook);
650 		kfree(f);
651 	}
652 	mutex_unlock(&fanout_mutex);
653 }
654 
655 static const struct proto_ops packet_ops;
656 
657 static const struct proto_ops packet_ops_spkt;
658 
659 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
660 			   struct packet_type *pt, struct net_device *orig_dev)
661 {
662 	struct sock *sk;
663 	struct sockaddr_pkt *spkt;
664 
665 	/*
666 	 *	When we registered the protocol we saved the socket in the data
667 	 *	field for just this event.
668 	 */
669 
670 	sk = pt->af_packet_priv;
671 
672 	/*
673 	 *	Yank back the headers [hope the device set this
674 	 *	right or kerboom...]
675 	 *
676 	 *	Incoming packets have ll header pulled,
677 	 *	push it back.
678 	 *
679 	 *	For outgoing ones skb->data == skb_mac_header(skb)
680 	 *	so that this procedure is noop.
681 	 */
682 
683 	if (skb->pkt_type == PACKET_LOOPBACK)
684 		goto out;
685 
686 	if (!net_eq(dev_net(dev), sock_net(sk)))
687 		goto out;
688 
689 	skb = skb_share_check(skb, GFP_ATOMIC);
690 	if (skb == NULL)
691 		goto oom;
692 
693 	/* drop any routing info */
694 	skb_dst_drop(skb);
695 
696 	/* drop conntrack reference */
697 	nf_reset(skb);
698 
699 	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
700 
701 	skb_push(skb, skb->data - skb_mac_header(skb));
702 
703 	/*
704 	 *	The SOCK_PACKET socket receives _all_ frames.
705 	 */
706 
707 	spkt->spkt_family = dev->type;
708 	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
709 	spkt->spkt_protocol = skb->protocol;
710 
711 	/*
712 	 *	Charge the memory to the socket. This is done specifically
713 	 *	to prevent sockets using all the memory up.
714 	 */
715 
716 	if (sock_queue_rcv_skb(sk, skb) == 0)
717 		return 0;
718 
719 out:
720 	kfree_skb(skb);
721 oom:
722 	return 0;
723 }
724 
725 
726 /*
727  *	Output a raw packet to a device layer. This bypasses all the other
728  *	protocol layers and you must therefore supply it with a complete frame
729  */
730 
731 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
732 			       struct msghdr *msg, size_t len)
733 {
734 	struct sock *sk = sock->sk;
735 	struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
736 	struct sk_buff *skb = NULL;
737 	struct net_device *dev;
738 	__be16 proto = 0;
739 	int err;
740 
741 	/*
742 	 *	Get and verify the address.
743 	 */
744 
745 	if (saddr) {
746 		if (msg->msg_namelen < sizeof(struct sockaddr))
747 			return -EINVAL;
748 		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
749 			proto = saddr->spkt_protocol;
750 	} else
751 		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
752 
753 	/*
754 	 *	Find the device first to size check it
755 	 */
756 
757 	saddr->spkt_device[13] = 0;
758 retry:
759 	rcu_read_lock();
760 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
761 	err = -ENODEV;
762 	if (dev == NULL)
763 		goto out_unlock;
764 
765 	err = -ENETDOWN;
766 	if (!(dev->flags & IFF_UP))
767 		goto out_unlock;
768 
769 	/*
770 	 * You may not queue a frame bigger than the mtu. This is the lowest level
771 	 * raw protocol and you must do your own fragmentation at this level.
772 	 */
773 
774 	err = -EMSGSIZE;
775 	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
776 		goto out_unlock;
777 
778 	if (!skb) {
779 		size_t reserved = LL_RESERVED_SPACE(dev);
780 		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
781 
782 		rcu_read_unlock();
783 		skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
784 		if (skb == NULL)
785 			return -ENOBUFS;
786 		/* FIXME: Save some space for broken drivers that write a hard
787 		 * header at transmission time by themselves. PPP is the notable
788 		 * one here. This should really be fixed at the driver level.
789 		 */
790 		skb_reserve(skb, reserved);
791 		skb_reset_network_header(skb);
792 
793 		/* Try to align data part correctly */
794 		if (hhlen) {
795 			skb->data -= hhlen;
796 			skb->tail -= hhlen;
797 			if (len < hhlen)
798 				skb_reset_network_header(skb);
799 		}
800 		err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
801 		if (err)
802 			goto out_free;
803 		goto retry;
804 	}
805 
806 	if (len > (dev->mtu + dev->hard_header_len)) {
807 		/* Earlier code assumed this would be a VLAN pkt,
808 		 * double-check this now that we have the actual
809 		 * packet in hand.
810 		 */
811 		struct ethhdr *ehdr;
812 		skb_reset_mac_header(skb);
813 		ehdr = eth_hdr(skb);
814 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
815 			err = -EMSGSIZE;
816 			goto out_unlock;
817 		}
818 	}
819 
820 	skb->protocol = proto;
821 	skb->dev = dev;
822 	skb->priority = sk->sk_priority;
823 	skb->mark = sk->sk_mark;
824 	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
825 	if (err < 0)
826 		goto out_unlock;
827 
828 	dev_queue_xmit(skb);
829 	rcu_read_unlock();
830 	return len;
831 
832 out_unlock:
833 	rcu_read_unlock();
834 out_free:
835 	kfree_skb(skb);
836 	return err;
837 }
838 
839 static inline unsigned int run_filter(const struct sk_buff *skb,
840 				      const struct sock *sk,
841 				      unsigned int res)
842 {
843 	struct sk_filter *filter;
844 
845 	rcu_read_lock();
846 	filter = rcu_dereference(sk->sk_filter);
847 	if (filter != NULL)
848 		res = SK_RUN_FILTER(filter, skb);
849 	rcu_read_unlock();
850 
851 	return res;
852 }
853 
854 /*
855  * This function makes lazy skb cloning in hope that most of packets
856  * are discarded by BPF.
857  *
858  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
859  * and skb->cb are mangled. It works because (and until) packets
860  * falling here are owned by current CPU. Output packets are cloned
861  * by dev_queue_xmit_nit(), input packets are processed by net_bh
862  * sequencially, so that if we return skb to original state on exit,
863  * we will not harm anyone.
864  */
865 
866 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
867 		      struct packet_type *pt, struct net_device *orig_dev)
868 {
869 	struct sock *sk;
870 	struct sockaddr_ll *sll;
871 	struct packet_sock *po;
872 	u8 *skb_head = skb->data;
873 	int skb_len = skb->len;
874 	unsigned int snaplen, res;
875 
876 	if (skb->pkt_type == PACKET_LOOPBACK)
877 		goto drop;
878 
879 	sk = pt->af_packet_priv;
880 	po = pkt_sk(sk);
881 
882 	if (!net_eq(dev_net(dev), sock_net(sk)))
883 		goto drop;
884 
885 	skb->dev = dev;
886 
887 	if (dev->header_ops) {
888 		/* The device has an explicit notion of ll header,
889 		 * exported to higher levels.
890 		 *
891 		 * Otherwise, the device hides details of its frame
892 		 * structure, so that corresponding packet head is
893 		 * never delivered to user.
894 		 */
895 		if (sk->sk_type != SOCK_DGRAM)
896 			skb_push(skb, skb->data - skb_mac_header(skb));
897 		else if (skb->pkt_type == PACKET_OUTGOING) {
898 			/* Special case: outgoing packets have ll header at head */
899 			skb_pull(skb, skb_network_offset(skb));
900 		}
901 	}
902 
903 	snaplen = skb->len;
904 
905 	res = run_filter(skb, sk, snaplen);
906 	if (!res)
907 		goto drop_n_restore;
908 	if (snaplen > res)
909 		snaplen = res;
910 
911 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
912 	    (unsigned)sk->sk_rcvbuf)
913 		goto drop_n_acct;
914 
915 	if (skb_shared(skb)) {
916 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
917 		if (nskb == NULL)
918 			goto drop_n_acct;
919 
920 		if (skb_head != skb->data) {
921 			skb->data = skb_head;
922 			skb->len = skb_len;
923 		}
924 		kfree_skb(skb);
925 		skb = nskb;
926 	}
927 
928 	BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
929 		     sizeof(skb->cb));
930 
931 	sll = &PACKET_SKB_CB(skb)->sa.ll;
932 	sll->sll_family = AF_PACKET;
933 	sll->sll_hatype = dev->type;
934 	sll->sll_protocol = skb->protocol;
935 	sll->sll_pkttype = skb->pkt_type;
936 	if (unlikely(po->origdev))
937 		sll->sll_ifindex = orig_dev->ifindex;
938 	else
939 		sll->sll_ifindex = dev->ifindex;
940 
941 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
942 
943 	PACKET_SKB_CB(skb)->origlen = skb->len;
944 
945 	if (pskb_trim(skb, snaplen))
946 		goto drop_n_acct;
947 
948 	skb_set_owner_r(skb, sk);
949 	skb->dev = NULL;
950 	skb_dst_drop(skb);
951 
952 	/* drop conntrack reference */
953 	nf_reset(skb);
954 
955 	spin_lock(&sk->sk_receive_queue.lock);
956 	po->stats.tp_packets++;
957 	skb->dropcount = atomic_read(&sk->sk_drops);
958 	__skb_queue_tail(&sk->sk_receive_queue, skb);
959 	spin_unlock(&sk->sk_receive_queue.lock);
960 	sk->sk_data_ready(sk, skb->len);
961 	return 0;
962 
963 drop_n_acct:
964 	spin_lock(&sk->sk_receive_queue.lock);
965 	po->stats.tp_drops++;
966 	atomic_inc(&sk->sk_drops);
967 	spin_unlock(&sk->sk_receive_queue.lock);
968 
969 drop_n_restore:
970 	if (skb_head != skb->data && skb_shared(skb)) {
971 		skb->data = skb_head;
972 		skb->len = skb_len;
973 	}
974 drop:
975 	consume_skb(skb);
976 	return 0;
977 }
978 
979 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
980 		       struct packet_type *pt, struct net_device *orig_dev)
981 {
982 	struct sock *sk;
983 	struct packet_sock *po;
984 	struct sockaddr_ll *sll;
985 	union {
986 		struct tpacket_hdr *h1;
987 		struct tpacket2_hdr *h2;
988 		void *raw;
989 	} h;
990 	u8 *skb_head = skb->data;
991 	int skb_len = skb->len;
992 	unsigned int snaplen, res;
993 	unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
994 	unsigned short macoff, netoff, hdrlen;
995 	struct sk_buff *copy_skb = NULL;
996 	struct timeval tv;
997 	struct timespec ts;
998 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
999 
1000 	if (skb->pkt_type == PACKET_LOOPBACK)
1001 		goto drop;
1002 
1003 	sk = pt->af_packet_priv;
1004 	po = pkt_sk(sk);
1005 
1006 	if (!net_eq(dev_net(dev), sock_net(sk)))
1007 		goto drop;
1008 
1009 	if (dev->header_ops) {
1010 		if (sk->sk_type != SOCK_DGRAM)
1011 			skb_push(skb, skb->data - skb_mac_header(skb));
1012 		else if (skb->pkt_type == PACKET_OUTGOING) {
1013 			/* Special case: outgoing packets have ll header at head */
1014 			skb_pull(skb, skb_network_offset(skb));
1015 		}
1016 	}
1017 
1018 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1019 		status |= TP_STATUS_CSUMNOTREADY;
1020 
1021 	snaplen = skb->len;
1022 
1023 	res = run_filter(skb, sk, snaplen);
1024 	if (!res)
1025 		goto drop_n_restore;
1026 	if (snaplen > res)
1027 		snaplen = res;
1028 
1029 	if (sk->sk_type == SOCK_DGRAM) {
1030 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1031 				  po->tp_reserve;
1032 	} else {
1033 		unsigned maclen = skb_network_offset(skb);
1034 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
1035 				       (maclen < 16 ? 16 : maclen)) +
1036 			po->tp_reserve;
1037 		macoff = netoff - maclen;
1038 	}
1039 
1040 	if (macoff + snaplen > po->rx_ring.frame_size) {
1041 		if (po->copy_thresh &&
1042 		    atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
1043 		    (unsigned)sk->sk_rcvbuf) {
1044 			if (skb_shared(skb)) {
1045 				copy_skb = skb_clone(skb, GFP_ATOMIC);
1046 			} else {
1047 				copy_skb = skb_get(skb);
1048 				skb_head = skb->data;
1049 			}
1050 			if (copy_skb)
1051 				skb_set_owner_r(copy_skb, sk);
1052 		}
1053 		snaplen = po->rx_ring.frame_size - macoff;
1054 		if ((int)snaplen < 0)
1055 			snaplen = 0;
1056 	}
1057 
1058 	spin_lock(&sk->sk_receive_queue.lock);
1059 	h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
1060 	if (!h.raw)
1061 		goto ring_is_full;
1062 	packet_increment_head(&po->rx_ring);
1063 	po->stats.tp_packets++;
1064 	if (copy_skb) {
1065 		status |= TP_STATUS_COPY;
1066 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1067 	}
1068 	if (!po->stats.tp_drops)
1069 		status &= ~TP_STATUS_LOSING;
1070 	spin_unlock(&sk->sk_receive_queue.lock);
1071 
1072 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1073 
1074 	switch (po->tp_version) {
1075 	case TPACKET_V1:
1076 		h.h1->tp_len = skb->len;
1077 		h.h1->tp_snaplen = snaplen;
1078 		h.h1->tp_mac = macoff;
1079 		h.h1->tp_net = netoff;
1080 		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1081 				&& shhwtstamps->syststamp.tv64)
1082 			tv = ktime_to_timeval(shhwtstamps->syststamp);
1083 		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1084 				&& shhwtstamps->hwtstamp.tv64)
1085 			tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1086 		else if (skb->tstamp.tv64)
1087 			tv = ktime_to_timeval(skb->tstamp);
1088 		else
1089 			do_gettimeofday(&tv);
1090 		h.h1->tp_sec = tv.tv_sec;
1091 		h.h1->tp_usec = tv.tv_usec;
1092 		hdrlen = sizeof(*h.h1);
1093 		break;
1094 	case TPACKET_V2:
1095 		h.h2->tp_len = skb->len;
1096 		h.h2->tp_snaplen = snaplen;
1097 		h.h2->tp_mac = macoff;
1098 		h.h2->tp_net = netoff;
1099 		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1100 				&& shhwtstamps->syststamp.tv64)
1101 			ts = ktime_to_timespec(shhwtstamps->syststamp);
1102 		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1103 				&& shhwtstamps->hwtstamp.tv64)
1104 			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1105 		else if (skb->tstamp.tv64)
1106 			ts = ktime_to_timespec(skb->tstamp);
1107 		else
1108 			getnstimeofday(&ts);
1109 		h.h2->tp_sec = ts.tv_sec;
1110 		h.h2->tp_nsec = ts.tv_nsec;
1111 		if (vlan_tx_tag_present(skb)) {
1112 			h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1113 			status |= TP_STATUS_VLAN_VALID;
1114 		} else {
1115 			h.h2->tp_vlan_tci = 0;
1116 		}
1117 		h.h2->tp_padding = 0;
1118 		hdrlen = sizeof(*h.h2);
1119 		break;
1120 	default:
1121 		BUG();
1122 	}
1123 
1124 	sll = h.raw + TPACKET_ALIGN(hdrlen);
1125 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1126 	sll->sll_family = AF_PACKET;
1127 	sll->sll_hatype = dev->type;
1128 	sll->sll_protocol = skb->protocol;
1129 	sll->sll_pkttype = skb->pkt_type;
1130 	if (unlikely(po->origdev))
1131 		sll->sll_ifindex = orig_dev->ifindex;
1132 	else
1133 		sll->sll_ifindex = dev->ifindex;
1134 
1135 	smp_mb();
1136 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1137 	{
1138 		u8 *start, *end;
1139 
1140 		end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
1141 		for (start = h.raw; start < end; start += PAGE_SIZE)
1142 			flush_dcache_page(pgv_to_page(start));
1143 		smp_wmb();
1144 	}
1145 #endif
1146 	__packet_set_status(po, h.raw, status);
1147 
1148 	sk->sk_data_ready(sk, 0);
1149 
1150 drop_n_restore:
1151 	if (skb_head != skb->data && skb_shared(skb)) {
1152 		skb->data = skb_head;
1153 		skb->len = skb_len;
1154 	}
1155 drop:
1156 	kfree_skb(skb);
1157 	return 0;
1158 
1159 ring_is_full:
1160 	po->stats.tp_drops++;
1161 	spin_unlock(&sk->sk_receive_queue.lock);
1162 
1163 	sk->sk_data_ready(sk, 0);
1164 	kfree_skb(copy_skb);
1165 	goto drop_n_restore;
1166 }
1167 
1168 static void tpacket_destruct_skb(struct sk_buff *skb)
1169 {
1170 	struct packet_sock *po = pkt_sk(skb->sk);
1171 	void *ph;
1172 
1173 	BUG_ON(skb == NULL);
1174 
1175 	if (likely(po->tx_ring.pg_vec)) {
1176 		ph = skb_shinfo(skb)->destructor_arg;
1177 		BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
1178 		BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1179 		atomic_dec(&po->tx_ring.pending);
1180 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1181 	}
1182 
1183 	sock_wfree(skb);
1184 }
1185 
1186 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1187 		void *frame, struct net_device *dev, int size_max,
1188 		__be16 proto, unsigned char *addr)
1189 {
1190 	union {
1191 		struct tpacket_hdr *h1;
1192 		struct tpacket2_hdr *h2;
1193 		void *raw;
1194 	} ph;
1195 	int to_write, offset, len, tp_len, nr_frags, len_max;
1196 	struct socket *sock = po->sk.sk_socket;
1197 	struct page *page;
1198 	void *data;
1199 	int err;
1200 
1201 	ph.raw = frame;
1202 
1203 	skb->protocol = proto;
1204 	skb->dev = dev;
1205 	skb->priority = po->sk.sk_priority;
1206 	skb->mark = po->sk.sk_mark;
1207 	skb_shinfo(skb)->destructor_arg = ph.raw;
1208 
1209 	switch (po->tp_version) {
1210 	case TPACKET_V2:
1211 		tp_len = ph.h2->tp_len;
1212 		break;
1213 	default:
1214 		tp_len = ph.h1->tp_len;
1215 		break;
1216 	}
1217 	if (unlikely(tp_len > size_max)) {
1218 		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1219 		return -EMSGSIZE;
1220 	}
1221 
1222 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
1223 	skb_reset_network_header(skb);
1224 
1225 	data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1226 	to_write = tp_len;
1227 
1228 	if (sock->type == SOCK_DGRAM) {
1229 		err = dev_hard_header(skb, dev, ntohs(proto), addr,
1230 				NULL, tp_len);
1231 		if (unlikely(err < 0))
1232 			return -EINVAL;
1233 	} else if (dev->hard_header_len) {
1234 		/* net device doesn't like empty head */
1235 		if (unlikely(tp_len <= dev->hard_header_len)) {
1236 			pr_err("packet size is too short (%d < %d)\n",
1237 			       tp_len, dev->hard_header_len);
1238 			return -EINVAL;
1239 		}
1240 
1241 		skb_push(skb, dev->hard_header_len);
1242 		err = skb_store_bits(skb, 0, data,
1243 				dev->hard_header_len);
1244 		if (unlikely(err))
1245 			return err;
1246 
1247 		data += dev->hard_header_len;
1248 		to_write -= dev->hard_header_len;
1249 	}
1250 
1251 	err = -EFAULT;
1252 	offset = offset_in_page(data);
1253 	len_max = PAGE_SIZE - offset;
1254 	len = ((to_write > len_max) ? len_max : to_write);
1255 
1256 	skb->data_len = to_write;
1257 	skb->len += to_write;
1258 	skb->truesize += to_write;
1259 	atomic_add(to_write, &po->sk.sk_wmem_alloc);
1260 
1261 	while (likely(to_write)) {
1262 		nr_frags = skb_shinfo(skb)->nr_frags;
1263 
1264 		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
1265 			pr_err("Packet exceed the number of skb frags(%lu)\n",
1266 			       MAX_SKB_FRAGS);
1267 			return -EFAULT;
1268 		}
1269 
1270 		page = pgv_to_page(data);
1271 		data += len;
1272 		flush_dcache_page(page);
1273 		get_page(page);
1274 		skb_fill_page_desc(skb, nr_frags, page, offset, len);
1275 		to_write -= len;
1276 		offset = 0;
1277 		len_max = PAGE_SIZE;
1278 		len = ((to_write > len_max) ? len_max : to_write);
1279 	}
1280 
1281 	return tp_len;
1282 }
1283 
1284 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1285 {
1286 	struct sk_buff *skb;
1287 	struct net_device *dev;
1288 	__be16 proto;
1289 	bool need_rls_dev = false;
1290 	int err, reserve = 0;
1291 	void *ph;
1292 	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1293 	int tp_len, size_max;
1294 	unsigned char *addr;
1295 	int len_sum = 0;
1296 	int status = 0;
1297 
1298 	mutex_lock(&po->pg_vec_lock);
1299 
1300 	err = -EBUSY;
1301 	if (saddr == NULL) {
1302 		dev = po->prot_hook.dev;
1303 		proto	= po->num;
1304 		addr	= NULL;
1305 	} else {
1306 		err = -EINVAL;
1307 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1308 			goto out;
1309 		if (msg->msg_namelen < (saddr->sll_halen
1310 					+ offsetof(struct sockaddr_ll,
1311 						sll_addr)))
1312 			goto out;
1313 		proto	= saddr->sll_protocol;
1314 		addr	= saddr->sll_addr;
1315 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1316 		need_rls_dev = true;
1317 	}
1318 
1319 	err = -ENXIO;
1320 	if (unlikely(dev == NULL))
1321 		goto out;
1322 
1323 	reserve = dev->hard_header_len;
1324 
1325 	err = -ENETDOWN;
1326 	if (unlikely(!(dev->flags & IFF_UP)))
1327 		goto out_put;
1328 
1329 	size_max = po->tx_ring.frame_size
1330 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
1331 
1332 	if (size_max > dev->mtu + reserve)
1333 		size_max = dev->mtu + reserve;
1334 
1335 	do {
1336 		ph = packet_current_frame(po, &po->tx_ring,
1337 				TP_STATUS_SEND_REQUEST);
1338 
1339 		if (unlikely(ph == NULL)) {
1340 			schedule();
1341 			continue;
1342 		}
1343 
1344 		status = TP_STATUS_SEND_REQUEST;
1345 		skb = sock_alloc_send_skb(&po->sk,
1346 				LL_ALLOCATED_SPACE(dev)
1347 				+ sizeof(struct sockaddr_ll),
1348 				0, &err);
1349 
1350 		if (unlikely(skb == NULL))
1351 			goto out_status;
1352 
1353 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1354 				addr);
1355 
1356 		if (unlikely(tp_len < 0)) {
1357 			if (po->tp_loss) {
1358 				__packet_set_status(po, ph,
1359 						TP_STATUS_AVAILABLE);
1360 				packet_increment_head(&po->tx_ring);
1361 				kfree_skb(skb);
1362 				continue;
1363 			} else {
1364 				status = TP_STATUS_WRONG_FORMAT;
1365 				err = tp_len;
1366 				goto out_status;
1367 			}
1368 		}
1369 
1370 		skb->destructor = tpacket_destruct_skb;
1371 		__packet_set_status(po, ph, TP_STATUS_SENDING);
1372 		atomic_inc(&po->tx_ring.pending);
1373 
1374 		status = TP_STATUS_SEND_REQUEST;
1375 		err = dev_queue_xmit(skb);
1376 		if (unlikely(err > 0)) {
1377 			err = net_xmit_errno(err);
1378 			if (err && __packet_get_status(po, ph) ==
1379 				   TP_STATUS_AVAILABLE) {
1380 				/* skb was destructed already */
1381 				skb = NULL;
1382 				goto out_status;
1383 			}
1384 			/*
1385 			 * skb was dropped but not destructed yet;
1386 			 * let's treat it like congestion or err < 0
1387 			 */
1388 			err = 0;
1389 		}
1390 		packet_increment_head(&po->tx_ring);
1391 		len_sum += tp_len;
1392 	} while (likely((ph != NULL) ||
1393 			((!(msg->msg_flags & MSG_DONTWAIT)) &&
1394 			 (atomic_read(&po->tx_ring.pending))))
1395 		);
1396 
1397 	err = len_sum;
1398 	goto out_put;
1399 
1400 out_status:
1401 	__packet_set_status(po, ph, status);
1402 	kfree_skb(skb);
1403 out_put:
1404 	if (need_rls_dev)
1405 		dev_put(dev);
1406 out:
1407 	mutex_unlock(&po->pg_vec_lock);
1408 	return err;
1409 }
1410 
1411 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1412 					       size_t reserve, size_t len,
1413 					       size_t linear, int noblock,
1414 					       int *err)
1415 {
1416 	struct sk_buff *skb;
1417 
1418 	/* Under a page?  Don't bother with paged skb. */
1419 	if (prepad + len < PAGE_SIZE || !linear)
1420 		linear = len;
1421 
1422 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1423 				   err);
1424 	if (!skb)
1425 		return NULL;
1426 
1427 	skb_reserve(skb, reserve);
1428 	skb_put(skb, linear);
1429 	skb->data_len = len - linear;
1430 	skb->len += len - linear;
1431 
1432 	return skb;
1433 }
1434 
1435 static int packet_snd(struct socket *sock,
1436 			  struct msghdr *msg, size_t len)
1437 {
1438 	struct sock *sk = sock->sk;
1439 	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1440 	struct sk_buff *skb;
1441 	struct net_device *dev;
1442 	__be16 proto;
1443 	bool need_rls_dev = false;
1444 	unsigned char *addr;
1445 	int err, reserve = 0;
1446 	struct virtio_net_hdr vnet_hdr = { 0 };
1447 	int offset = 0;
1448 	int vnet_hdr_len;
1449 	struct packet_sock *po = pkt_sk(sk);
1450 	unsigned short gso_type = 0;
1451 
1452 	/*
1453 	 *	Get and verify the address.
1454 	 */
1455 
1456 	if (saddr == NULL) {
1457 		dev = po->prot_hook.dev;
1458 		proto	= po->num;
1459 		addr	= NULL;
1460 	} else {
1461 		err = -EINVAL;
1462 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1463 			goto out;
1464 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1465 			goto out;
1466 		proto	= saddr->sll_protocol;
1467 		addr	= saddr->sll_addr;
1468 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1469 		need_rls_dev = true;
1470 	}
1471 
1472 	err = -ENXIO;
1473 	if (dev == NULL)
1474 		goto out_unlock;
1475 	if (sock->type == SOCK_RAW)
1476 		reserve = dev->hard_header_len;
1477 
1478 	err = -ENETDOWN;
1479 	if (!(dev->flags & IFF_UP))
1480 		goto out_unlock;
1481 
1482 	if (po->has_vnet_hdr) {
1483 		vnet_hdr_len = sizeof(vnet_hdr);
1484 
1485 		err = -EINVAL;
1486 		if (len < vnet_hdr_len)
1487 			goto out_unlock;
1488 
1489 		len -= vnet_hdr_len;
1490 
1491 		err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1492 				       vnet_hdr_len);
1493 		if (err < 0)
1494 			goto out_unlock;
1495 
1496 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1497 		    (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1498 		      vnet_hdr.hdr_len))
1499 			vnet_hdr.hdr_len = vnet_hdr.csum_start +
1500 						 vnet_hdr.csum_offset + 2;
1501 
1502 		err = -EINVAL;
1503 		if (vnet_hdr.hdr_len > len)
1504 			goto out_unlock;
1505 
1506 		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1507 			switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1508 			case VIRTIO_NET_HDR_GSO_TCPV4:
1509 				gso_type = SKB_GSO_TCPV4;
1510 				break;
1511 			case VIRTIO_NET_HDR_GSO_TCPV6:
1512 				gso_type = SKB_GSO_TCPV6;
1513 				break;
1514 			case VIRTIO_NET_HDR_GSO_UDP:
1515 				gso_type = SKB_GSO_UDP;
1516 				break;
1517 			default:
1518 				goto out_unlock;
1519 			}
1520 
1521 			if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1522 				gso_type |= SKB_GSO_TCP_ECN;
1523 
1524 			if (vnet_hdr.gso_size == 0)
1525 				goto out_unlock;
1526 
1527 		}
1528 	}
1529 
1530 	err = -EMSGSIZE;
1531 	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
1532 		goto out_unlock;
1533 
1534 	err = -ENOBUFS;
1535 	skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1536 			       LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1537 			       msg->msg_flags & MSG_DONTWAIT, &err);
1538 	if (skb == NULL)
1539 		goto out_unlock;
1540 
1541 	skb_set_network_header(skb, reserve);
1542 
1543 	err = -EINVAL;
1544 	if (sock->type == SOCK_DGRAM &&
1545 	    (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1546 		goto out_free;
1547 
1548 	/* Returns -EFAULT on error */
1549 	err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1550 	if (err)
1551 		goto out_free;
1552 	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1553 	if (err < 0)
1554 		goto out_free;
1555 
1556 	if (!gso_type && (len > dev->mtu + reserve)) {
1557 		/* Earlier code assumed this would be a VLAN pkt,
1558 		 * double-check this now that we have the actual
1559 		 * packet in hand.
1560 		 */
1561 		struct ethhdr *ehdr;
1562 		skb_reset_mac_header(skb);
1563 		ehdr = eth_hdr(skb);
1564 		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1565 			err = -EMSGSIZE;
1566 			goto out_free;
1567 		}
1568 	}
1569 
1570 	skb->protocol = proto;
1571 	skb->dev = dev;
1572 	skb->priority = sk->sk_priority;
1573 	skb->mark = sk->sk_mark;
1574 
1575 	if (po->has_vnet_hdr) {
1576 		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1577 			if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1578 						  vnet_hdr.csum_offset)) {
1579 				err = -EINVAL;
1580 				goto out_free;
1581 			}
1582 		}
1583 
1584 		skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1585 		skb_shinfo(skb)->gso_type = gso_type;
1586 
1587 		/* Header must be checked, and gso_segs computed. */
1588 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1589 		skb_shinfo(skb)->gso_segs = 0;
1590 
1591 		len += vnet_hdr_len;
1592 	}
1593 
1594 	/*
1595 	 *	Now send it
1596 	 */
1597 
1598 	err = dev_queue_xmit(skb);
1599 	if (err > 0 && (err = net_xmit_errno(err)) != 0)
1600 		goto out_unlock;
1601 
1602 	if (need_rls_dev)
1603 		dev_put(dev);
1604 
1605 	return len;
1606 
1607 out_free:
1608 	kfree_skb(skb);
1609 out_unlock:
1610 	if (dev && need_rls_dev)
1611 		dev_put(dev);
1612 out:
1613 	return err;
1614 }
1615 
1616 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1617 		struct msghdr *msg, size_t len)
1618 {
1619 	struct sock *sk = sock->sk;
1620 	struct packet_sock *po = pkt_sk(sk);
1621 	if (po->tx_ring.pg_vec)
1622 		return tpacket_snd(po, msg);
1623 	else
1624 		return packet_snd(sock, msg, len);
1625 }
1626 
1627 /*
1628  *	Close a PACKET socket. This is fairly simple. We immediately go
1629  *	to 'closed' state and remove our protocol entry in the device list.
1630  */
1631 
1632 static int packet_release(struct socket *sock)
1633 {
1634 	struct sock *sk = sock->sk;
1635 	struct packet_sock *po;
1636 	struct net *net;
1637 	struct tpacket_req req;
1638 
1639 	if (!sk)
1640 		return 0;
1641 
1642 	net = sock_net(sk);
1643 	po = pkt_sk(sk);
1644 
1645 	spin_lock_bh(&net->packet.sklist_lock);
1646 	sk_del_node_init_rcu(sk);
1647 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1648 	spin_unlock_bh(&net->packet.sklist_lock);
1649 
1650 	spin_lock(&po->bind_lock);
1651 	unregister_prot_hook(sk, false);
1652 	if (po->prot_hook.dev) {
1653 		dev_put(po->prot_hook.dev);
1654 		po->prot_hook.dev = NULL;
1655 	}
1656 	spin_unlock(&po->bind_lock);
1657 
1658 	packet_flush_mclist(sk);
1659 
1660 	memset(&req, 0, sizeof(req));
1661 
1662 	if (po->rx_ring.pg_vec)
1663 		packet_set_ring(sk, &req, 1, 0);
1664 
1665 	if (po->tx_ring.pg_vec)
1666 		packet_set_ring(sk, &req, 1, 1);
1667 
1668 	fanout_release(sk);
1669 
1670 	synchronize_net();
1671 	/*
1672 	 *	Now the socket is dead. No more input will appear.
1673 	 */
1674 	sock_orphan(sk);
1675 	sock->sk = NULL;
1676 
1677 	/* Purge queues */
1678 
1679 	skb_queue_purge(&sk->sk_receive_queue);
1680 	sk_refcnt_debug_release(sk);
1681 
1682 	sock_put(sk);
1683 	return 0;
1684 }
1685 
1686 /*
1687  *	Attach a packet hook.
1688  */
1689 
1690 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1691 {
1692 	struct packet_sock *po = pkt_sk(sk);
1693 
1694 	if (po->fanout)
1695 		return -EINVAL;
1696 
1697 	lock_sock(sk);
1698 
1699 	spin_lock(&po->bind_lock);
1700 	unregister_prot_hook(sk, true);
1701 	po->num = protocol;
1702 	po->prot_hook.type = protocol;
1703 	if (po->prot_hook.dev)
1704 		dev_put(po->prot_hook.dev);
1705 	po->prot_hook.dev = dev;
1706 
1707 	po->ifindex = dev ? dev->ifindex : 0;
1708 
1709 	if (protocol == 0)
1710 		goto out_unlock;
1711 
1712 	if (!dev || (dev->flags & IFF_UP)) {
1713 		register_prot_hook(sk);
1714 	} else {
1715 		sk->sk_err = ENETDOWN;
1716 		if (!sock_flag(sk, SOCK_DEAD))
1717 			sk->sk_error_report(sk);
1718 	}
1719 
1720 out_unlock:
1721 	spin_unlock(&po->bind_lock);
1722 	release_sock(sk);
1723 	return 0;
1724 }
1725 
1726 /*
1727  *	Bind a packet socket to a device
1728  */
1729 
1730 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1731 			    int addr_len)
1732 {
1733 	struct sock *sk = sock->sk;
1734 	char name[15];
1735 	struct net_device *dev;
1736 	int err = -ENODEV;
1737 
1738 	/*
1739 	 *	Check legality
1740 	 */
1741 
1742 	if (addr_len != sizeof(struct sockaddr))
1743 		return -EINVAL;
1744 	strlcpy(name, uaddr->sa_data, sizeof(name));
1745 
1746 	dev = dev_get_by_name(sock_net(sk), name);
1747 	if (dev)
1748 		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1749 	return err;
1750 }
1751 
1752 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1753 {
1754 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1755 	struct sock *sk = sock->sk;
1756 	struct net_device *dev = NULL;
1757 	int err;
1758 
1759 
1760 	/*
1761 	 *	Check legality
1762 	 */
1763 
1764 	if (addr_len < sizeof(struct sockaddr_ll))
1765 		return -EINVAL;
1766 	if (sll->sll_family != AF_PACKET)
1767 		return -EINVAL;
1768 
1769 	if (sll->sll_ifindex) {
1770 		err = -ENODEV;
1771 		dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1772 		if (dev == NULL)
1773 			goto out;
1774 	}
1775 	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1776 
1777 out:
1778 	return err;
1779 }
1780 
1781 static struct proto packet_proto = {
1782 	.name	  = "PACKET",
1783 	.owner	  = THIS_MODULE,
1784 	.obj_size = sizeof(struct packet_sock),
1785 };
1786 
1787 /*
1788  *	Create a packet of type SOCK_PACKET.
1789  */
1790 
1791 static int packet_create(struct net *net, struct socket *sock, int protocol,
1792 			 int kern)
1793 {
1794 	struct sock *sk;
1795 	struct packet_sock *po;
1796 	__be16 proto = (__force __be16)protocol; /* weird, but documented */
1797 	int err;
1798 
1799 	if (!capable(CAP_NET_RAW))
1800 		return -EPERM;
1801 	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1802 	    sock->type != SOCK_PACKET)
1803 		return -ESOCKTNOSUPPORT;
1804 
1805 	sock->state = SS_UNCONNECTED;
1806 
1807 	err = -ENOBUFS;
1808 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1809 	if (sk == NULL)
1810 		goto out;
1811 
1812 	sock->ops = &packet_ops;
1813 	if (sock->type == SOCK_PACKET)
1814 		sock->ops = &packet_ops_spkt;
1815 
1816 	sock_init_data(sock, sk);
1817 
1818 	po = pkt_sk(sk);
1819 	sk->sk_family = PF_PACKET;
1820 	po->num = proto;
1821 
1822 	sk->sk_destruct = packet_sock_destruct;
1823 	sk_refcnt_debug_inc(sk);
1824 
1825 	/*
1826 	 *	Attach a protocol block
1827 	 */
1828 
1829 	spin_lock_init(&po->bind_lock);
1830 	mutex_init(&po->pg_vec_lock);
1831 	po->prot_hook.func = packet_rcv;
1832 
1833 	if (sock->type == SOCK_PACKET)
1834 		po->prot_hook.func = packet_rcv_spkt;
1835 
1836 	po->prot_hook.af_packet_priv = sk;
1837 
1838 	if (proto) {
1839 		po->prot_hook.type = proto;
1840 		register_prot_hook(sk);
1841 	}
1842 
1843 	spin_lock_bh(&net->packet.sklist_lock);
1844 	sk_add_node_rcu(sk, &net->packet.sklist);
1845 	sock_prot_inuse_add(net, &packet_proto, 1);
1846 	spin_unlock_bh(&net->packet.sklist_lock);
1847 
1848 	return 0;
1849 out:
1850 	return err;
1851 }
1852 
1853 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1854 {
1855 	struct sock_exterr_skb *serr;
1856 	struct sk_buff *skb, *skb2;
1857 	int copied, err;
1858 
1859 	err = -EAGAIN;
1860 	skb = skb_dequeue(&sk->sk_error_queue);
1861 	if (skb == NULL)
1862 		goto out;
1863 
1864 	copied = skb->len;
1865 	if (copied > len) {
1866 		msg->msg_flags |= MSG_TRUNC;
1867 		copied = len;
1868 	}
1869 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1870 	if (err)
1871 		goto out_free_skb;
1872 
1873 	sock_recv_timestamp(msg, sk, skb);
1874 
1875 	serr = SKB_EXT_ERR(skb);
1876 	put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1877 		 sizeof(serr->ee), &serr->ee);
1878 
1879 	msg->msg_flags |= MSG_ERRQUEUE;
1880 	err = copied;
1881 
1882 	/* Reset and regenerate socket error */
1883 	spin_lock_bh(&sk->sk_error_queue.lock);
1884 	sk->sk_err = 0;
1885 	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1886 		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1887 		spin_unlock_bh(&sk->sk_error_queue.lock);
1888 		sk->sk_error_report(sk);
1889 	} else
1890 		spin_unlock_bh(&sk->sk_error_queue.lock);
1891 
1892 out_free_skb:
1893 	kfree_skb(skb);
1894 out:
1895 	return err;
1896 }
1897 
1898 /*
1899  *	Pull a packet from our receive queue and hand it to the user.
1900  *	If necessary we block.
1901  */
1902 
1903 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1904 			  struct msghdr *msg, size_t len, int flags)
1905 {
1906 	struct sock *sk = sock->sk;
1907 	struct sk_buff *skb;
1908 	int copied, err;
1909 	struct sockaddr_ll *sll;
1910 	int vnet_hdr_len = 0;
1911 
1912 	err = -EINVAL;
1913 	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1914 		goto out;
1915 
1916 #if 0
1917 	/* What error should we return now? EUNATTACH? */
1918 	if (pkt_sk(sk)->ifindex < 0)
1919 		return -ENODEV;
1920 #endif
1921 
1922 	if (flags & MSG_ERRQUEUE) {
1923 		err = packet_recv_error(sk, msg, len);
1924 		goto out;
1925 	}
1926 
1927 	/*
1928 	 *	Call the generic datagram receiver. This handles all sorts
1929 	 *	of horrible races and re-entrancy so we can forget about it
1930 	 *	in the protocol layers.
1931 	 *
1932 	 *	Now it will return ENETDOWN, if device have just gone down,
1933 	 *	but then it will block.
1934 	 */
1935 
1936 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1937 
1938 	/*
1939 	 *	An error occurred so return it. Because skb_recv_datagram()
1940 	 *	handles the blocking we don't see and worry about blocking
1941 	 *	retries.
1942 	 */
1943 
1944 	if (skb == NULL)
1945 		goto out;
1946 
1947 	if (pkt_sk(sk)->has_vnet_hdr) {
1948 		struct virtio_net_hdr vnet_hdr = { 0 };
1949 
1950 		err = -EINVAL;
1951 		vnet_hdr_len = sizeof(vnet_hdr);
1952 		if (len < vnet_hdr_len)
1953 			goto out_free;
1954 
1955 		len -= vnet_hdr_len;
1956 
1957 		if (skb_is_gso(skb)) {
1958 			struct skb_shared_info *sinfo = skb_shinfo(skb);
1959 
1960 			/* This is a hint as to how much should be linear. */
1961 			vnet_hdr.hdr_len = skb_headlen(skb);
1962 			vnet_hdr.gso_size = sinfo->gso_size;
1963 			if (sinfo->gso_type & SKB_GSO_TCPV4)
1964 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1965 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
1966 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1967 			else if (sinfo->gso_type & SKB_GSO_UDP)
1968 				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1969 			else if (sinfo->gso_type & SKB_GSO_FCOE)
1970 				goto out_free;
1971 			else
1972 				BUG();
1973 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1974 				vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1975 		} else
1976 			vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1977 
1978 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1979 			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1980 			vnet_hdr.csum_start = skb_checksum_start_offset(skb);
1981 			vnet_hdr.csum_offset = skb->csum_offset;
1982 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1983 			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1984 		} /* else everything is zero */
1985 
1986 		err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1987 				     vnet_hdr_len);
1988 		if (err < 0)
1989 			goto out_free;
1990 	}
1991 
1992 	/*
1993 	 *	If the address length field is there to be filled in, we fill
1994 	 *	it in now.
1995 	 */
1996 
1997 	sll = &PACKET_SKB_CB(skb)->sa.ll;
1998 	if (sock->type == SOCK_PACKET)
1999 		msg->msg_namelen = sizeof(struct sockaddr_pkt);
2000 	else
2001 		msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2002 
2003 	/*
2004 	 *	You lose any data beyond the buffer you gave. If it worries a
2005 	 *	user program they can ask the device for its MTU anyway.
2006 	 */
2007 
2008 	copied = skb->len;
2009 	if (copied > len) {
2010 		copied = len;
2011 		msg->msg_flags |= MSG_TRUNC;
2012 	}
2013 
2014 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2015 	if (err)
2016 		goto out_free;
2017 
2018 	sock_recv_ts_and_drops(msg, sk, skb);
2019 
2020 	if (msg->msg_name)
2021 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2022 		       msg->msg_namelen);
2023 
2024 	if (pkt_sk(sk)->auxdata) {
2025 		struct tpacket_auxdata aux;
2026 
2027 		aux.tp_status = TP_STATUS_USER;
2028 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2029 			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2030 		aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2031 		aux.tp_snaplen = skb->len;
2032 		aux.tp_mac = 0;
2033 		aux.tp_net = skb_network_offset(skb);
2034 		if (vlan_tx_tag_present(skb)) {
2035 			aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2036 			aux.tp_status |= TP_STATUS_VLAN_VALID;
2037 		} else {
2038 			aux.tp_vlan_tci = 0;
2039 		}
2040 		aux.tp_padding = 0;
2041 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2042 	}
2043 
2044 	/*
2045 	 *	Free or return the buffer as appropriate. Again this
2046 	 *	hides all the races and re-entrancy issues from us.
2047 	 */
2048 	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2049 
2050 out_free:
2051 	skb_free_datagram(sk, skb);
2052 out:
2053 	return err;
2054 }
2055 
2056 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2057 			       int *uaddr_len, int peer)
2058 {
2059 	struct net_device *dev;
2060 	struct sock *sk	= sock->sk;
2061 
2062 	if (peer)
2063 		return -EOPNOTSUPP;
2064 
2065 	uaddr->sa_family = AF_PACKET;
2066 	rcu_read_lock();
2067 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2068 	if (dev)
2069 		strncpy(uaddr->sa_data, dev->name, 14);
2070 	else
2071 		memset(uaddr->sa_data, 0, 14);
2072 	rcu_read_unlock();
2073 	*uaddr_len = sizeof(*uaddr);
2074 
2075 	return 0;
2076 }
2077 
2078 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2079 			  int *uaddr_len, int peer)
2080 {
2081 	struct net_device *dev;
2082 	struct sock *sk = sock->sk;
2083 	struct packet_sock *po = pkt_sk(sk);
2084 	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2085 
2086 	if (peer)
2087 		return -EOPNOTSUPP;
2088 
2089 	sll->sll_family = AF_PACKET;
2090 	sll->sll_ifindex = po->ifindex;
2091 	sll->sll_protocol = po->num;
2092 	sll->sll_pkttype = 0;
2093 	rcu_read_lock();
2094 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2095 	if (dev) {
2096 		sll->sll_hatype = dev->type;
2097 		sll->sll_halen = dev->addr_len;
2098 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2099 	} else {
2100 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
2101 		sll->sll_halen = 0;
2102 	}
2103 	rcu_read_unlock();
2104 	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2105 
2106 	return 0;
2107 }
2108 
2109 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2110 			 int what)
2111 {
2112 	switch (i->type) {
2113 	case PACKET_MR_MULTICAST:
2114 		if (i->alen != dev->addr_len)
2115 			return -EINVAL;
2116 		if (what > 0)
2117 			return dev_mc_add(dev, i->addr);
2118 		else
2119 			return dev_mc_del(dev, i->addr);
2120 		break;
2121 	case PACKET_MR_PROMISC:
2122 		return dev_set_promiscuity(dev, what);
2123 		break;
2124 	case PACKET_MR_ALLMULTI:
2125 		return dev_set_allmulti(dev, what);
2126 		break;
2127 	case PACKET_MR_UNICAST:
2128 		if (i->alen != dev->addr_len)
2129 			return -EINVAL;
2130 		if (what > 0)
2131 			return dev_uc_add(dev, i->addr);
2132 		else
2133 			return dev_uc_del(dev, i->addr);
2134 		break;
2135 	default:
2136 		break;
2137 	}
2138 	return 0;
2139 }
2140 
2141 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2142 {
2143 	for ( ; i; i = i->next) {
2144 		if (i->ifindex == dev->ifindex)
2145 			packet_dev_mc(dev, i, what);
2146 	}
2147 }
2148 
2149 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2150 {
2151 	struct packet_sock *po = pkt_sk(sk);
2152 	struct packet_mclist *ml, *i;
2153 	struct net_device *dev;
2154 	int err;
2155 
2156 	rtnl_lock();
2157 
2158 	err = -ENODEV;
2159 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2160 	if (!dev)
2161 		goto done;
2162 
2163 	err = -EINVAL;
2164 	if (mreq->mr_alen > dev->addr_len)
2165 		goto done;
2166 
2167 	err = -ENOBUFS;
2168 	i = kmalloc(sizeof(*i), GFP_KERNEL);
2169 	if (i == NULL)
2170 		goto done;
2171 
2172 	err = 0;
2173 	for (ml = po->mclist; ml; ml = ml->next) {
2174 		if (ml->ifindex == mreq->mr_ifindex &&
2175 		    ml->type == mreq->mr_type &&
2176 		    ml->alen == mreq->mr_alen &&
2177 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2178 			ml->count++;
2179 			/* Free the new element ... */
2180 			kfree(i);
2181 			goto done;
2182 		}
2183 	}
2184 
2185 	i->type = mreq->mr_type;
2186 	i->ifindex = mreq->mr_ifindex;
2187 	i->alen = mreq->mr_alen;
2188 	memcpy(i->addr, mreq->mr_address, i->alen);
2189 	i->count = 1;
2190 	i->next = po->mclist;
2191 	po->mclist = i;
2192 	err = packet_dev_mc(dev, i, 1);
2193 	if (err) {
2194 		po->mclist = i->next;
2195 		kfree(i);
2196 	}
2197 
2198 done:
2199 	rtnl_unlock();
2200 	return err;
2201 }
2202 
2203 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2204 {
2205 	struct packet_mclist *ml, **mlp;
2206 
2207 	rtnl_lock();
2208 
2209 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2210 		if (ml->ifindex == mreq->mr_ifindex &&
2211 		    ml->type == mreq->mr_type &&
2212 		    ml->alen == mreq->mr_alen &&
2213 		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2214 			if (--ml->count == 0) {
2215 				struct net_device *dev;
2216 				*mlp = ml->next;
2217 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2218 				if (dev)
2219 					packet_dev_mc(dev, ml, -1);
2220 				kfree(ml);
2221 			}
2222 			rtnl_unlock();
2223 			return 0;
2224 		}
2225 	}
2226 	rtnl_unlock();
2227 	return -EADDRNOTAVAIL;
2228 }
2229 
2230 static void packet_flush_mclist(struct sock *sk)
2231 {
2232 	struct packet_sock *po = pkt_sk(sk);
2233 	struct packet_mclist *ml;
2234 
2235 	if (!po->mclist)
2236 		return;
2237 
2238 	rtnl_lock();
2239 	while ((ml = po->mclist) != NULL) {
2240 		struct net_device *dev;
2241 
2242 		po->mclist = ml->next;
2243 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2244 		if (dev != NULL)
2245 			packet_dev_mc(dev, ml, -1);
2246 		kfree(ml);
2247 	}
2248 	rtnl_unlock();
2249 }
2250 
2251 static int
2252 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2253 {
2254 	struct sock *sk = sock->sk;
2255 	struct packet_sock *po = pkt_sk(sk);
2256 	int ret;
2257 
2258 	if (level != SOL_PACKET)
2259 		return -ENOPROTOOPT;
2260 
2261 	switch (optname) {
2262 	case PACKET_ADD_MEMBERSHIP:
2263 	case PACKET_DROP_MEMBERSHIP:
2264 	{
2265 		struct packet_mreq_max mreq;
2266 		int len = optlen;
2267 		memset(&mreq, 0, sizeof(mreq));
2268 		if (len < sizeof(struct packet_mreq))
2269 			return -EINVAL;
2270 		if (len > sizeof(mreq))
2271 			len = sizeof(mreq);
2272 		if (copy_from_user(&mreq, optval, len))
2273 			return -EFAULT;
2274 		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
2275 			return -EINVAL;
2276 		if (optname == PACKET_ADD_MEMBERSHIP)
2277 			ret = packet_mc_add(sk, &mreq);
2278 		else
2279 			ret = packet_mc_drop(sk, &mreq);
2280 		return ret;
2281 	}
2282 
2283 	case PACKET_RX_RING:
2284 	case PACKET_TX_RING:
2285 	{
2286 		struct tpacket_req req;
2287 
2288 		if (optlen < sizeof(req))
2289 			return -EINVAL;
2290 		if (pkt_sk(sk)->has_vnet_hdr)
2291 			return -EINVAL;
2292 		if (copy_from_user(&req, optval, sizeof(req)))
2293 			return -EFAULT;
2294 		return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
2295 	}
2296 	case PACKET_COPY_THRESH:
2297 	{
2298 		int val;
2299 
2300 		if (optlen != sizeof(val))
2301 			return -EINVAL;
2302 		if (copy_from_user(&val, optval, sizeof(val)))
2303 			return -EFAULT;
2304 
2305 		pkt_sk(sk)->copy_thresh = val;
2306 		return 0;
2307 	}
2308 	case PACKET_VERSION:
2309 	{
2310 		int val;
2311 
2312 		if (optlen != sizeof(val))
2313 			return -EINVAL;
2314 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2315 			return -EBUSY;
2316 		if (copy_from_user(&val, optval, sizeof(val)))
2317 			return -EFAULT;
2318 		switch (val) {
2319 		case TPACKET_V1:
2320 		case TPACKET_V2:
2321 			po->tp_version = val;
2322 			return 0;
2323 		default:
2324 			return -EINVAL;
2325 		}
2326 	}
2327 	case PACKET_RESERVE:
2328 	{
2329 		unsigned int val;
2330 
2331 		if (optlen != sizeof(val))
2332 			return -EINVAL;
2333 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2334 			return -EBUSY;
2335 		if (copy_from_user(&val, optval, sizeof(val)))
2336 			return -EFAULT;
2337 		po->tp_reserve = val;
2338 		return 0;
2339 	}
2340 	case PACKET_LOSS:
2341 	{
2342 		unsigned int val;
2343 
2344 		if (optlen != sizeof(val))
2345 			return -EINVAL;
2346 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2347 			return -EBUSY;
2348 		if (copy_from_user(&val, optval, sizeof(val)))
2349 			return -EFAULT;
2350 		po->tp_loss = !!val;
2351 		return 0;
2352 	}
2353 	case PACKET_AUXDATA:
2354 	{
2355 		int val;
2356 
2357 		if (optlen < sizeof(val))
2358 			return -EINVAL;
2359 		if (copy_from_user(&val, optval, sizeof(val)))
2360 			return -EFAULT;
2361 
2362 		po->auxdata = !!val;
2363 		return 0;
2364 	}
2365 	case PACKET_ORIGDEV:
2366 	{
2367 		int val;
2368 
2369 		if (optlen < sizeof(val))
2370 			return -EINVAL;
2371 		if (copy_from_user(&val, optval, sizeof(val)))
2372 			return -EFAULT;
2373 
2374 		po->origdev = !!val;
2375 		return 0;
2376 	}
2377 	case PACKET_VNET_HDR:
2378 	{
2379 		int val;
2380 
2381 		if (sock->type != SOCK_RAW)
2382 			return -EINVAL;
2383 		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2384 			return -EBUSY;
2385 		if (optlen < sizeof(val))
2386 			return -EINVAL;
2387 		if (copy_from_user(&val, optval, sizeof(val)))
2388 			return -EFAULT;
2389 
2390 		po->has_vnet_hdr = !!val;
2391 		return 0;
2392 	}
2393 	case PACKET_TIMESTAMP:
2394 	{
2395 		int val;
2396 
2397 		if (optlen != sizeof(val))
2398 			return -EINVAL;
2399 		if (copy_from_user(&val, optval, sizeof(val)))
2400 			return -EFAULT;
2401 
2402 		po->tp_tstamp = val;
2403 		return 0;
2404 	}
2405 	case PACKET_FANOUT:
2406 	{
2407 		int val;
2408 
2409 		if (optlen != sizeof(val))
2410 			return -EINVAL;
2411 		if (copy_from_user(&val, optval, sizeof(val)))
2412 			return -EFAULT;
2413 
2414 		return fanout_add(sk, val & 0xffff, val >> 16);
2415 	}
2416 	default:
2417 		return -ENOPROTOOPT;
2418 	}
2419 }
2420 
2421 static int packet_getsockopt(struct socket *sock, int level, int optname,
2422 			     char __user *optval, int __user *optlen)
2423 {
2424 	int len;
2425 	int val;
2426 	struct sock *sk = sock->sk;
2427 	struct packet_sock *po = pkt_sk(sk);
2428 	void *data;
2429 	struct tpacket_stats st;
2430 
2431 	if (level != SOL_PACKET)
2432 		return -ENOPROTOOPT;
2433 
2434 	if (get_user(len, optlen))
2435 		return -EFAULT;
2436 
2437 	if (len < 0)
2438 		return -EINVAL;
2439 
2440 	switch (optname) {
2441 	case PACKET_STATISTICS:
2442 		if (len > sizeof(struct tpacket_stats))
2443 			len = sizeof(struct tpacket_stats);
2444 		spin_lock_bh(&sk->sk_receive_queue.lock);
2445 		st = po->stats;
2446 		memset(&po->stats, 0, sizeof(st));
2447 		spin_unlock_bh(&sk->sk_receive_queue.lock);
2448 		st.tp_packets += st.tp_drops;
2449 
2450 		data = &st;
2451 		break;
2452 	case PACKET_AUXDATA:
2453 		if (len > sizeof(int))
2454 			len = sizeof(int);
2455 		val = po->auxdata;
2456 
2457 		data = &val;
2458 		break;
2459 	case PACKET_ORIGDEV:
2460 		if (len > sizeof(int))
2461 			len = sizeof(int);
2462 		val = po->origdev;
2463 
2464 		data = &val;
2465 		break;
2466 	case PACKET_VNET_HDR:
2467 		if (len > sizeof(int))
2468 			len = sizeof(int);
2469 		val = po->has_vnet_hdr;
2470 
2471 		data = &val;
2472 		break;
2473 	case PACKET_VERSION:
2474 		if (len > sizeof(int))
2475 			len = sizeof(int);
2476 		val = po->tp_version;
2477 		data = &val;
2478 		break;
2479 	case PACKET_HDRLEN:
2480 		if (len > sizeof(int))
2481 			len = sizeof(int);
2482 		if (copy_from_user(&val, optval, len))
2483 			return -EFAULT;
2484 		switch (val) {
2485 		case TPACKET_V1:
2486 			val = sizeof(struct tpacket_hdr);
2487 			break;
2488 		case TPACKET_V2:
2489 			val = sizeof(struct tpacket2_hdr);
2490 			break;
2491 		default:
2492 			return -EINVAL;
2493 		}
2494 		data = &val;
2495 		break;
2496 	case PACKET_RESERVE:
2497 		if (len > sizeof(unsigned int))
2498 			len = sizeof(unsigned int);
2499 		val = po->tp_reserve;
2500 		data = &val;
2501 		break;
2502 	case PACKET_LOSS:
2503 		if (len > sizeof(unsigned int))
2504 			len = sizeof(unsigned int);
2505 		val = po->tp_loss;
2506 		data = &val;
2507 		break;
2508 	case PACKET_TIMESTAMP:
2509 		if (len > sizeof(int))
2510 			len = sizeof(int);
2511 		val = po->tp_tstamp;
2512 		data = &val;
2513 		break;
2514 	case PACKET_FANOUT:
2515 		if (len > sizeof(int))
2516 			len = sizeof(int);
2517 		val = (po->fanout ?
2518 		       ((u32)po->fanout->id |
2519 			((u32)po->fanout->type << 16)) :
2520 		       0);
2521 		data = &val;
2522 		break;
2523 	default:
2524 		return -ENOPROTOOPT;
2525 	}
2526 
2527 	if (put_user(len, optlen))
2528 		return -EFAULT;
2529 	if (copy_to_user(optval, data, len))
2530 		return -EFAULT;
2531 	return 0;
2532 }
2533 
2534 
2535 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2536 {
2537 	struct sock *sk;
2538 	struct hlist_node *node;
2539 	struct net_device *dev = data;
2540 	struct net *net = dev_net(dev);
2541 
2542 	rcu_read_lock();
2543 	sk_for_each_rcu(sk, node, &net->packet.sklist) {
2544 		struct packet_sock *po = pkt_sk(sk);
2545 
2546 		switch (msg) {
2547 		case NETDEV_UNREGISTER:
2548 			if (po->mclist)
2549 				packet_dev_mclist(dev, po->mclist, -1);
2550 			/* fallthrough */
2551 
2552 		case NETDEV_DOWN:
2553 			if (dev->ifindex == po->ifindex) {
2554 				spin_lock(&po->bind_lock);
2555 				if (po->running) {
2556 					__unregister_prot_hook(sk, false);
2557 					sk->sk_err = ENETDOWN;
2558 					if (!sock_flag(sk, SOCK_DEAD))
2559 						sk->sk_error_report(sk);
2560 				}
2561 				if (msg == NETDEV_UNREGISTER) {
2562 					po->ifindex = -1;
2563 					if (po->prot_hook.dev)
2564 						dev_put(po->prot_hook.dev);
2565 					po->prot_hook.dev = NULL;
2566 				}
2567 				spin_unlock(&po->bind_lock);
2568 			}
2569 			break;
2570 		case NETDEV_UP:
2571 			if (dev->ifindex == po->ifindex) {
2572 				spin_lock(&po->bind_lock);
2573 				if (po->num)
2574 					register_prot_hook(sk);
2575 				spin_unlock(&po->bind_lock);
2576 			}
2577 			break;
2578 		}
2579 	}
2580 	rcu_read_unlock();
2581 	return NOTIFY_DONE;
2582 }
2583 
2584 
2585 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2586 			unsigned long arg)
2587 {
2588 	struct sock *sk = sock->sk;
2589 
2590 	switch (cmd) {
2591 	case SIOCOUTQ:
2592 	{
2593 		int amount = sk_wmem_alloc_get(sk);
2594 
2595 		return put_user(amount, (int __user *)arg);
2596 	}
2597 	case SIOCINQ:
2598 	{
2599 		struct sk_buff *skb;
2600 		int amount = 0;
2601 
2602 		spin_lock_bh(&sk->sk_receive_queue.lock);
2603 		skb = skb_peek(&sk->sk_receive_queue);
2604 		if (skb)
2605 			amount = skb->len;
2606 		spin_unlock_bh(&sk->sk_receive_queue.lock);
2607 		return put_user(amount, (int __user *)arg);
2608 	}
2609 	case SIOCGSTAMP:
2610 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
2611 	case SIOCGSTAMPNS:
2612 		return sock_get_timestampns(sk, (struct timespec __user *)arg);
2613 
2614 #ifdef CONFIG_INET
2615 	case SIOCADDRT:
2616 	case SIOCDELRT:
2617 	case SIOCDARP:
2618 	case SIOCGARP:
2619 	case SIOCSARP:
2620 	case SIOCGIFADDR:
2621 	case SIOCSIFADDR:
2622 	case SIOCGIFBRDADDR:
2623 	case SIOCSIFBRDADDR:
2624 	case SIOCGIFNETMASK:
2625 	case SIOCSIFNETMASK:
2626 	case SIOCGIFDSTADDR:
2627 	case SIOCSIFDSTADDR:
2628 	case SIOCSIFFLAGS:
2629 		return inet_dgram_ops.ioctl(sock, cmd, arg);
2630 #endif
2631 
2632 	default:
2633 		return -ENOIOCTLCMD;
2634 	}
2635 	return 0;
2636 }
2637 
2638 static unsigned int packet_poll(struct file *file, struct socket *sock,
2639 				poll_table *wait)
2640 {
2641 	struct sock *sk = sock->sk;
2642 	struct packet_sock *po = pkt_sk(sk);
2643 	unsigned int mask = datagram_poll(file, sock, wait);
2644 
2645 	spin_lock_bh(&sk->sk_receive_queue.lock);
2646 	if (po->rx_ring.pg_vec) {
2647 		if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2648 			mask |= POLLIN | POLLRDNORM;
2649 	}
2650 	spin_unlock_bh(&sk->sk_receive_queue.lock);
2651 	spin_lock_bh(&sk->sk_write_queue.lock);
2652 	if (po->tx_ring.pg_vec) {
2653 		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2654 			mask |= POLLOUT | POLLWRNORM;
2655 	}
2656 	spin_unlock_bh(&sk->sk_write_queue.lock);
2657 	return mask;
2658 }
2659 
2660 
2661 /* Dirty? Well, I still did not learn better way to account
2662  * for user mmaps.
2663  */
2664 
2665 static void packet_mm_open(struct vm_area_struct *vma)
2666 {
2667 	struct file *file = vma->vm_file;
2668 	struct socket *sock = file->private_data;
2669 	struct sock *sk = sock->sk;
2670 
2671 	if (sk)
2672 		atomic_inc(&pkt_sk(sk)->mapped);
2673 }
2674 
2675 static void packet_mm_close(struct vm_area_struct *vma)
2676 {
2677 	struct file *file = vma->vm_file;
2678 	struct socket *sock = file->private_data;
2679 	struct sock *sk = sock->sk;
2680 
2681 	if (sk)
2682 		atomic_dec(&pkt_sk(sk)->mapped);
2683 }
2684 
2685 static const struct vm_operations_struct packet_mmap_ops = {
2686 	.open	=	packet_mm_open,
2687 	.close	=	packet_mm_close,
2688 };
2689 
2690 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
2691 			unsigned int len)
2692 {
2693 	int i;
2694 
2695 	for (i = 0; i < len; i++) {
2696 		if (likely(pg_vec[i].buffer)) {
2697 			if (is_vmalloc_addr(pg_vec[i].buffer))
2698 				vfree(pg_vec[i].buffer);
2699 			else
2700 				free_pages((unsigned long)pg_vec[i].buffer,
2701 					   order);
2702 			pg_vec[i].buffer = NULL;
2703 		}
2704 	}
2705 	kfree(pg_vec);
2706 }
2707 
2708 static inline char *alloc_one_pg_vec_page(unsigned long order)
2709 {
2710 	char *buffer = NULL;
2711 	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
2712 			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
2713 
2714 	buffer = (char *) __get_free_pages(gfp_flags, order);
2715 
2716 	if (buffer)
2717 		return buffer;
2718 
2719 	/*
2720 	 * __get_free_pages failed, fall back to vmalloc
2721 	 */
2722 	buffer = vzalloc((1 << order) * PAGE_SIZE);
2723 
2724 	if (buffer)
2725 		return buffer;
2726 
2727 	/*
2728 	 * vmalloc failed, lets dig into swap here
2729 	 */
2730 	gfp_flags &= ~__GFP_NORETRY;
2731 	buffer = (char *)__get_free_pages(gfp_flags, order);
2732 	if (buffer)
2733 		return buffer;
2734 
2735 	/*
2736 	 * complete and utter failure
2737 	 */
2738 	return NULL;
2739 }
2740 
2741 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
2742 {
2743 	unsigned int block_nr = req->tp_block_nr;
2744 	struct pgv *pg_vec;
2745 	int i;
2746 
2747 	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
2748 	if (unlikely(!pg_vec))
2749 		goto out;
2750 
2751 	for (i = 0; i < block_nr; i++) {
2752 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
2753 		if (unlikely(!pg_vec[i].buffer))
2754 			goto out_free_pgvec;
2755 	}
2756 
2757 out:
2758 	return pg_vec;
2759 
2760 out_free_pgvec:
2761 	free_pg_vec(pg_vec, order, block_nr);
2762 	pg_vec = NULL;
2763 	goto out;
2764 }
2765 
2766 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2767 		int closing, int tx_ring)
2768 {
2769 	struct pgv *pg_vec = NULL;
2770 	struct packet_sock *po = pkt_sk(sk);
2771 	int was_running, order = 0;
2772 	struct packet_ring_buffer *rb;
2773 	struct sk_buff_head *rb_queue;
2774 	__be16 num;
2775 	int err;
2776 
2777 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2778 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2779 
2780 	err = -EBUSY;
2781 	if (!closing) {
2782 		if (atomic_read(&po->mapped))
2783 			goto out;
2784 		if (atomic_read(&rb->pending))
2785 			goto out;
2786 	}
2787 
2788 	if (req->tp_block_nr) {
2789 		/* Sanity tests and some calculations */
2790 		err = -EBUSY;
2791 		if (unlikely(rb->pg_vec))
2792 			goto out;
2793 
2794 		switch (po->tp_version) {
2795 		case TPACKET_V1:
2796 			po->tp_hdrlen = TPACKET_HDRLEN;
2797 			break;
2798 		case TPACKET_V2:
2799 			po->tp_hdrlen = TPACKET2_HDRLEN;
2800 			break;
2801 		}
2802 
2803 		err = -EINVAL;
2804 		if (unlikely((int)req->tp_block_size <= 0))
2805 			goto out;
2806 		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2807 			goto out;
2808 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2809 					po->tp_reserve))
2810 			goto out;
2811 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2812 			goto out;
2813 
2814 		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2815 		if (unlikely(rb->frames_per_block <= 0))
2816 			goto out;
2817 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2818 					req->tp_frame_nr))
2819 			goto out;
2820 
2821 		err = -ENOMEM;
2822 		order = get_order(req->tp_block_size);
2823 		pg_vec = alloc_pg_vec(req, order);
2824 		if (unlikely(!pg_vec))
2825 			goto out;
2826 	}
2827 	/* Done */
2828 	else {
2829 		err = -EINVAL;
2830 		if (unlikely(req->tp_frame_nr))
2831 			goto out;
2832 	}
2833 
2834 	lock_sock(sk);
2835 
2836 	/* Detach socket from network */
2837 	spin_lock(&po->bind_lock);
2838 	was_running = po->running;
2839 	num = po->num;
2840 	if (was_running) {
2841 		po->num = 0;
2842 		__unregister_prot_hook(sk, false);
2843 	}
2844 	spin_unlock(&po->bind_lock);
2845 
2846 	synchronize_net();
2847 
2848 	err = -EBUSY;
2849 	mutex_lock(&po->pg_vec_lock);
2850 	if (closing || atomic_read(&po->mapped) == 0) {
2851 		err = 0;
2852 		spin_lock_bh(&rb_queue->lock);
2853 		swap(rb->pg_vec, pg_vec);
2854 		rb->frame_max = (req->tp_frame_nr - 1);
2855 		rb->head = 0;
2856 		rb->frame_size = req->tp_frame_size;
2857 		spin_unlock_bh(&rb_queue->lock);
2858 
2859 		swap(rb->pg_vec_order, order);
2860 		swap(rb->pg_vec_len, req->tp_block_nr);
2861 
2862 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2863 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
2864 						tpacket_rcv : packet_rcv;
2865 		skb_queue_purge(rb_queue);
2866 		if (atomic_read(&po->mapped))
2867 			pr_err("packet_mmap: vma is busy: %d\n",
2868 			       atomic_read(&po->mapped));
2869 	}
2870 	mutex_unlock(&po->pg_vec_lock);
2871 
2872 	spin_lock(&po->bind_lock);
2873 	if (was_running) {
2874 		po->num = num;
2875 		register_prot_hook(sk);
2876 	}
2877 	spin_unlock(&po->bind_lock);
2878 
2879 	release_sock(sk);
2880 
2881 	if (pg_vec)
2882 		free_pg_vec(pg_vec, order, req->tp_block_nr);
2883 out:
2884 	return err;
2885 }
2886 
2887 static int packet_mmap(struct file *file, struct socket *sock,
2888 		struct vm_area_struct *vma)
2889 {
2890 	struct sock *sk = sock->sk;
2891 	struct packet_sock *po = pkt_sk(sk);
2892 	unsigned long size, expected_size;
2893 	struct packet_ring_buffer *rb;
2894 	unsigned long start;
2895 	int err = -EINVAL;
2896 	int i;
2897 
2898 	if (vma->vm_pgoff)
2899 		return -EINVAL;
2900 
2901 	mutex_lock(&po->pg_vec_lock);
2902 
2903 	expected_size = 0;
2904 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2905 		if (rb->pg_vec) {
2906 			expected_size += rb->pg_vec_len
2907 						* rb->pg_vec_pages
2908 						* PAGE_SIZE;
2909 		}
2910 	}
2911 
2912 	if (expected_size == 0)
2913 		goto out;
2914 
2915 	size = vma->vm_end - vma->vm_start;
2916 	if (size != expected_size)
2917 		goto out;
2918 
2919 	start = vma->vm_start;
2920 	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2921 		if (rb->pg_vec == NULL)
2922 			continue;
2923 
2924 		for (i = 0; i < rb->pg_vec_len; i++) {
2925 			struct page *page;
2926 			void *kaddr = rb->pg_vec[i].buffer;
2927 			int pg_num;
2928 
2929 			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
2930 				page = pgv_to_page(kaddr);
2931 				err = vm_insert_page(vma, start, page);
2932 				if (unlikely(err))
2933 					goto out;
2934 				start += PAGE_SIZE;
2935 				kaddr += PAGE_SIZE;
2936 			}
2937 		}
2938 	}
2939 
2940 	atomic_inc(&po->mapped);
2941 	vma->vm_ops = &packet_mmap_ops;
2942 	err = 0;
2943 
2944 out:
2945 	mutex_unlock(&po->pg_vec_lock);
2946 	return err;
2947 }
2948 
2949 static const struct proto_ops packet_ops_spkt = {
2950 	.family =	PF_PACKET,
2951 	.owner =	THIS_MODULE,
2952 	.release =	packet_release,
2953 	.bind =		packet_bind_spkt,
2954 	.connect =	sock_no_connect,
2955 	.socketpair =	sock_no_socketpair,
2956 	.accept =	sock_no_accept,
2957 	.getname =	packet_getname_spkt,
2958 	.poll =		datagram_poll,
2959 	.ioctl =	packet_ioctl,
2960 	.listen =	sock_no_listen,
2961 	.shutdown =	sock_no_shutdown,
2962 	.setsockopt =	sock_no_setsockopt,
2963 	.getsockopt =	sock_no_getsockopt,
2964 	.sendmsg =	packet_sendmsg_spkt,
2965 	.recvmsg =	packet_recvmsg,
2966 	.mmap =		sock_no_mmap,
2967 	.sendpage =	sock_no_sendpage,
2968 };
2969 
2970 static const struct proto_ops packet_ops = {
2971 	.family =	PF_PACKET,
2972 	.owner =	THIS_MODULE,
2973 	.release =	packet_release,
2974 	.bind =		packet_bind,
2975 	.connect =	sock_no_connect,
2976 	.socketpair =	sock_no_socketpair,
2977 	.accept =	sock_no_accept,
2978 	.getname =	packet_getname,
2979 	.poll =		packet_poll,
2980 	.ioctl =	packet_ioctl,
2981 	.listen =	sock_no_listen,
2982 	.shutdown =	sock_no_shutdown,
2983 	.setsockopt =	packet_setsockopt,
2984 	.getsockopt =	packet_getsockopt,
2985 	.sendmsg =	packet_sendmsg,
2986 	.recvmsg =	packet_recvmsg,
2987 	.mmap =		packet_mmap,
2988 	.sendpage =	sock_no_sendpage,
2989 };
2990 
2991 static const struct net_proto_family packet_family_ops = {
2992 	.family =	PF_PACKET,
2993 	.create =	packet_create,
2994 	.owner	=	THIS_MODULE,
2995 };
2996 
2997 static struct notifier_block packet_netdev_notifier = {
2998 	.notifier_call =	packet_notifier,
2999 };
3000 
3001 #ifdef CONFIG_PROC_FS
3002 
3003 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3004 	__acquires(RCU)
3005 {
3006 	struct net *net = seq_file_net(seq);
3007 
3008 	rcu_read_lock();
3009 	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3010 }
3011 
3012 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3013 {
3014 	struct net *net = seq_file_net(seq);
3015 	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3016 }
3017 
3018 static void packet_seq_stop(struct seq_file *seq, void *v)
3019 	__releases(RCU)
3020 {
3021 	rcu_read_unlock();
3022 }
3023 
3024 static int packet_seq_show(struct seq_file *seq, void *v)
3025 {
3026 	if (v == SEQ_START_TOKEN)
3027 		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
3028 	else {
3029 		struct sock *s = sk_entry(v);
3030 		const struct packet_sock *po = pkt_sk(s);
3031 
3032 		seq_printf(seq,
3033 			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
3034 			   s,
3035 			   atomic_read(&s->sk_refcnt),
3036 			   s->sk_type,
3037 			   ntohs(po->num),
3038 			   po->ifindex,
3039 			   po->running,
3040 			   atomic_read(&s->sk_rmem_alloc),
3041 			   sock_i_uid(s),
3042 			   sock_i_ino(s));
3043 	}
3044 
3045 	return 0;
3046 }
3047 
3048 static const struct seq_operations packet_seq_ops = {
3049 	.start	= packet_seq_start,
3050 	.next	= packet_seq_next,
3051 	.stop	= packet_seq_stop,
3052 	.show	= packet_seq_show,
3053 };
3054 
3055 static int packet_seq_open(struct inode *inode, struct file *file)
3056 {
3057 	return seq_open_net(inode, file, &packet_seq_ops,
3058 			    sizeof(struct seq_net_private));
3059 }
3060 
3061 static const struct file_operations packet_seq_fops = {
3062 	.owner		= THIS_MODULE,
3063 	.open		= packet_seq_open,
3064 	.read		= seq_read,
3065 	.llseek		= seq_lseek,
3066 	.release	= seq_release_net,
3067 };
3068 
3069 #endif
3070 
3071 static int __net_init packet_net_init(struct net *net)
3072 {
3073 	spin_lock_init(&net->packet.sklist_lock);
3074 	INIT_HLIST_HEAD(&net->packet.sklist);
3075 
3076 	if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3077 		return -ENOMEM;
3078 
3079 	return 0;
3080 }
3081 
3082 static void __net_exit packet_net_exit(struct net *net)
3083 {
3084 	proc_net_remove(net, "packet");
3085 }
3086 
3087 static struct pernet_operations packet_net_ops = {
3088 	.init = packet_net_init,
3089 	.exit = packet_net_exit,
3090 };
3091 
3092 
3093 static void __exit packet_exit(void)
3094 {
3095 	unregister_netdevice_notifier(&packet_netdev_notifier);
3096 	unregister_pernet_subsys(&packet_net_ops);
3097 	sock_unregister(PF_PACKET);
3098 	proto_unregister(&packet_proto);
3099 }
3100 
3101 static int __init packet_init(void)
3102 {
3103 	int rc = proto_register(&packet_proto, 0);
3104 
3105 	if (rc != 0)
3106 		goto out;
3107 
3108 	sock_register(&packet_family_ops);
3109 	register_pernet_subsys(&packet_net_ops);
3110 	register_netdevice_notifier(&packet_netdev_notifier);
3111 out:
3112 	return rc;
3113 }
3114 
3115 module_init(packet_init);
3116 module_exit(packet_exit);
3117 MODULE_LICENSE("GPL");
3118 MODULE_ALIAS_NETPROTO(PF_PACKET);
3119