1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * PACKET - implements raw packet sockets. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * 13 * Fixes: 14 * Alan Cox : verify_area() now used correctly 15 * Alan Cox : new skbuff lists, look ma no backlogs! 16 * Alan Cox : tidied skbuff lists. 17 * Alan Cox : Now uses generic datagram routines I 18 * added. Also fixed the peek/read crash 19 * from all old Linux datagram code. 20 * Alan Cox : Uses the improved datagram code. 21 * Alan Cox : Added NULL's for socket options. 22 * Alan Cox : Re-commented the code. 23 * Alan Cox : Use new kernel side addressing 24 * Rob Janssen : Correct MTU usage. 25 * Dave Platt : Counter leaks caused by incorrect 26 * interrupt locking and some slightly 27 * dubious gcc output. Can you read 28 * compiler: it said _VOLATILE_ 29 * Richard Kooijman : Timestamp fixes. 30 * Alan Cox : New buffers. Use sk->mac.raw. 31 * Alan Cox : sendmsg/recvmsg support. 32 * Alan Cox : Protocol setting support 33 * Alexey Kuznetsov : Untied from IPv4 stack. 34 * Cyrus Durgin : Fixed kerneld for kmod. 35 * Michal Ostrowski : Module initialization cleanup. 36 * Ulises Alonso : Frame number limit removal and 37 * packet_set_ring memory leak. 38 * Eric Biederman : Allow for > 8 byte hardware addresses. 39 * The convention is that longer addresses 40 * will simply extend the hardware address 41 * byte arrays at the end of sockaddr_ll 42 * and packet_mreq. 43 * Johann Baudy : Added TX RING. 44 * Chetan Loke : Implemented TPACKET_V3 block abstraction 45 * layer. 46 * Copyright (C) 2011, <lokec@ccs.neu.edu> 47 */ 48 49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 51 #include <linux/ethtool.h> 52 #include <linux/filter.h> 53 #include <linux/types.h> 54 #include <linux/mm.h> 55 #include <linux/capability.h> 56 #include <linux/fcntl.h> 57 #include <linux/socket.h> 58 #include <linux/in.h> 59 #include <linux/inet.h> 60 #include <linux/netdevice.h> 61 #include <linux/if_packet.h> 62 #include <linux/wireless.h> 63 #include <linux/kernel.h> 64 #include <linux/kmod.h> 65 #include <linux/slab.h> 66 #include <linux/vmalloc.h> 67 #include <net/net_namespace.h> 68 #include <net/ip.h> 69 #include <net/protocol.h> 70 #include <linux/skbuff.h> 71 #include <net/sock.h> 72 #include <linux/errno.h> 73 #include <linux/timer.h> 74 #include <linux/uaccess.h> 75 #include <asm/ioctls.h> 76 #include <asm/page.h> 77 #include <asm/cacheflush.h> 78 #include <asm/io.h> 79 #include <linux/proc_fs.h> 80 #include <linux/seq_file.h> 81 #include <linux/poll.h> 82 #include <linux/module.h> 83 #include <linux/init.h> 84 #include <linux/mutex.h> 85 #include <linux/if_vlan.h> 86 #include <linux/virtio_net.h> 87 #include <linux/errqueue.h> 88 #include <linux/net_tstamp.h> 89 #include <linux/percpu.h> 90 #ifdef CONFIG_INET 91 #include <net/inet_common.h> 92 #endif 93 #include <linux/bpf.h> 94 #include <net/compat.h> 95 #include <linux/netfilter_netdev.h> 96 97 #include "internal.h" 98 99 /* 100 Assumptions: 101 - If the device has no dev->header_ops->create, there is no LL header 102 visible above the device. In this case, its hard_header_len should be 0. 103 The device may prepend its own header internally. In this case, its 104 needed_headroom should be set to the space needed for it to add its 105 internal header. 106 For example, a WiFi driver pretending to be an Ethernet driver should 107 set its hard_header_len to be the Ethernet header length, and set its 108 needed_headroom to be (the real WiFi header length - the fake Ethernet 109 header length). 110 - packet socket receives packets with pulled ll header, 111 so that SOCK_RAW should push it back. 112 113 On receive: 114 ----------- 115 116 Incoming, dev_has_header(dev) == true 117 mac_header -> ll header 118 data -> data 119 120 Outgoing, dev_has_header(dev) == true 121 mac_header -> ll header 122 data -> ll header 123 124 Incoming, dev_has_header(dev) == false 125 mac_header -> data 126 However drivers often make it point to the ll header. 127 This is incorrect because the ll header should be invisible to us. 128 data -> data 129 130 Outgoing, dev_has_header(dev) == false 131 mac_header -> data. ll header is invisible to us. 132 data -> data 133 134 Resume 135 If dev_has_header(dev) == false we are unable to restore the ll header, 136 because it is invisible to us. 137 138 139 On transmit: 140 ------------ 141 142 dev_has_header(dev) == true 143 mac_header -> ll header 144 data -> ll header 145 146 dev_has_header(dev) == false (ll header is invisible to us) 147 mac_header -> data 148 data -> data 149 150 We should set network_header on output to the correct position, 151 packet classifier depends on it. 152 */ 153 154 /* Private packet socket structures. */ 155 156 /* identical to struct packet_mreq except it has 157 * a longer address field. 158 */ 159 struct packet_mreq_max { 160 int mr_ifindex; 161 unsigned short mr_type; 162 unsigned short mr_alen; 163 unsigned char mr_address[MAX_ADDR_LEN]; 164 }; 165 166 union tpacket_uhdr { 167 struct tpacket_hdr *h1; 168 struct tpacket2_hdr *h2; 169 struct tpacket3_hdr *h3; 170 void *raw; 171 }; 172 173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 174 int closing, int tx_ring); 175 176 #define V3_ALIGNMENT (8) 177 178 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 179 180 #define BLK_PLUS_PRIV(sz_of_priv) \ 181 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 182 183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 184 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 185 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 186 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 187 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 189 190 struct packet_sock; 191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 192 struct packet_type *pt, struct net_device *orig_dev); 193 194 static void *packet_previous_frame(struct packet_sock *po, 195 struct packet_ring_buffer *rb, 196 int status); 197 static void packet_increment_head(struct packet_ring_buffer *buff); 198 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 200 struct packet_sock *); 201 static void prb_retire_current_block(struct tpacket_kbdq_core *, 202 struct packet_sock *, unsigned int status); 203 static int prb_queue_frozen(struct tpacket_kbdq_core *); 204 static void prb_open_block(struct tpacket_kbdq_core *, 205 struct tpacket_block_desc *); 206 static void prb_retire_rx_blk_timer_expired(struct timer_list *); 207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 209 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 210 struct tpacket3_hdr *); 211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 212 struct tpacket3_hdr *); 213 static void packet_flush_mclist(struct sock *sk); 214 static u16 packet_pick_tx_queue(struct sk_buff *skb); 215 216 struct packet_skb_cb { 217 union { 218 struct sockaddr_pkt pkt; 219 union { 220 /* Trick: alias skb original length with 221 * ll.sll_family and ll.protocol in order 222 * to save room. 223 */ 224 unsigned int origlen; 225 struct sockaddr_ll ll; 226 }; 227 } sa; 228 }; 229 230 #define vio_le() virtio_legacy_is_little_endian() 231 232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 233 234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 235 #define GET_PBLOCK_DESC(x, bid) \ 236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 239 #define GET_NEXT_PRB_BLK_NUM(x) \ 240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 241 ((x)->kactive_blk_num+1) : 0) 242 243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 244 static void __fanout_link(struct sock *sk, struct packet_sock *po); 245 246 #ifdef CONFIG_NETFILTER_EGRESS 247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb) 248 { 249 struct sk_buff *next, *head = NULL, *tail; 250 int rc; 251 252 rcu_read_lock(); 253 for (; skb != NULL; skb = next) { 254 next = skb->next; 255 skb_mark_not_on_list(skb); 256 257 if (!nf_hook_egress(skb, &rc, skb->dev)) 258 continue; 259 260 if (!head) 261 head = skb; 262 else 263 tail->next = skb; 264 265 tail = skb; 266 } 267 rcu_read_unlock(); 268 269 return head; 270 } 271 #endif 272 273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb) 274 { 275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS)) 276 return dev_queue_xmit(skb); 277 278 #ifdef CONFIG_NETFILTER_EGRESS 279 if (nf_hook_egress_active()) { 280 skb = nf_hook_direct_egress(skb); 281 if (!skb) 282 return NET_XMIT_DROP; 283 } 284 #endif 285 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 286 } 287 288 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 289 { 290 struct net_device *dev; 291 292 rcu_read_lock(); 293 dev = rcu_dereference(po->cached_dev); 294 dev_hold(dev); 295 rcu_read_unlock(); 296 297 return dev; 298 } 299 300 static void packet_cached_dev_assign(struct packet_sock *po, 301 struct net_device *dev) 302 { 303 rcu_assign_pointer(po->cached_dev, dev); 304 } 305 306 static void packet_cached_dev_reset(struct packet_sock *po) 307 { 308 RCU_INIT_POINTER(po->cached_dev, NULL); 309 } 310 311 static u16 packet_pick_tx_queue(struct sk_buff *skb) 312 { 313 struct net_device *dev = skb->dev; 314 const struct net_device_ops *ops = dev->netdev_ops; 315 int cpu = raw_smp_processor_id(); 316 u16 queue_index; 317 318 #ifdef CONFIG_XPS 319 skb->sender_cpu = cpu + 1; 320 #endif 321 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); 322 if (ops->ndo_select_queue) { 323 queue_index = ops->ndo_select_queue(dev, skb, NULL); 324 queue_index = netdev_cap_txqueue(dev, queue_index); 325 } else { 326 queue_index = netdev_pick_tx(dev, skb, NULL); 327 } 328 329 return queue_index; 330 } 331 332 /* __register_prot_hook must be invoked through register_prot_hook 333 * or from a context in which asynchronous accesses to the packet 334 * socket is not possible (packet_create()). 335 */ 336 static void __register_prot_hook(struct sock *sk) 337 { 338 struct packet_sock *po = pkt_sk(sk); 339 340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 341 if (po->fanout) 342 __fanout_link(sk, po); 343 else 344 dev_add_pack(&po->prot_hook); 345 346 sock_hold(sk); 347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1); 348 } 349 } 350 351 static void register_prot_hook(struct sock *sk) 352 { 353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 354 __register_prot_hook(sk); 355 } 356 357 /* If the sync parameter is true, we will temporarily drop 358 * the po->bind_lock and do a synchronize_net to make sure no 359 * asynchronous packet processing paths still refer to the elements 360 * of po->prot_hook. If the sync parameter is false, it is the 361 * callers responsibility to take care of this. 362 */ 363 static void __unregister_prot_hook(struct sock *sk, bool sync) 364 { 365 struct packet_sock *po = pkt_sk(sk); 366 367 lockdep_assert_held_once(&po->bind_lock); 368 369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0); 370 371 if (po->fanout) 372 __fanout_unlink(sk, po); 373 else 374 __dev_remove_pack(&po->prot_hook); 375 376 __sock_put(sk); 377 378 if (sync) { 379 spin_unlock(&po->bind_lock); 380 synchronize_net(); 381 spin_lock(&po->bind_lock); 382 } 383 } 384 385 static void unregister_prot_hook(struct sock *sk, bool sync) 386 { 387 struct packet_sock *po = pkt_sk(sk); 388 389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) 390 __unregister_prot_hook(sk, sync); 391 } 392 393 static inline struct page * __pure pgv_to_page(void *addr) 394 { 395 if (is_vmalloc_addr(addr)) 396 return vmalloc_to_page(addr); 397 return virt_to_page(addr); 398 } 399 400 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 401 { 402 union tpacket_uhdr h; 403 404 /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ 405 406 h.raw = frame; 407 switch (po->tp_version) { 408 case TPACKET_V1: 409 WRITE_ONCE(h.h1->tp_status, status); 410 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 411 break; 412 case TPACKET_V2: 413 WRITE_ONCE(h.h2->tp_status, status); 414 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 415 break; 416 case TPACKET_V3: 417 WRITE_ONCE(h.h3->tp_status, status); 418 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 419 break; 420 default: 421 WARN(1, "TPACKET version not supported.\n"); 422 BUG(); 423 } 424 425 smp_wmb(); 426 } 427 428 static int __packet_get_status(const struct packet_sock *po, void *frame) 429 { 430 union tpacket_uhdr h; 431 432 smp_rmb(); 433 434 /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ 435 436 h.raw = frame; 437 switch (po->tp_version) { 438 case TPACKET_V1: 439 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 440 return READ_ONCE(h.h1->tp_status); 441 case TPACKET_V2: 442 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 443 return READ_ONCE(h.h2->tp_status); 444 case TPACKET_V3: 445 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 446 return READ_ONCE(h.h3->tp_status); 447 default: 448 WARN(1, "TPACKET version not supported.\n"); 449 BUG(); 450 return 0; 451 } 452 } 453 454 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, 455 unsigned int flags) 456 { 457 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 458 459 if (shhwtstamps && 460 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 461 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) 462 return TP_STATUS_TS_RAW_HARDWARE; 463 464 if ((flags & SOF_TIMESTAMPING_SOFTWARE) && 465 ktime_to_timespec64_cond(skb_tstamp(skb), ts)) 466 return TP_STATUS_TS_SOFTWARE; 467 468 return 0; 469 } 470 471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 472 struct sk_buff *skb) 473 { 474 union tpacket_uhdr h; 475 struct timespec64 ts; 476 __u32 ts_status; 477 478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp)))) 479 return 0; 480 481 h.raw = frame; 482 /* 483 * versions 1 through 3 overflow the timestamps in y2106, since they 484 * all store the seconds in a 32-bit unsigned integer. 485 * If we create a version 4, that should have a 64-bit timestamp, 486 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit 487 * nanoseconds. 488 */ 489 switch (po->tp_version) { 490 case TPACKET_V1: 491 h.h1->tp_sec = ts.tv_sec; 492 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 493 break; 494 case TPACKET_V2: 495 h.h2->tp_sec = ts.tv_sec; 496 h.h2->tp_nsec = ts.tv_nsec; 497 break; 498 case TPACKET_V3: 499 h.h3->tp_sec = ts.tv_sec; 500 h.h3->tp_nsec = ts.tv_nsec; 501 break; 502 default: 503 WARN(1, "TPACKET version not supported.\n"); 504 BUG(); 505 } 506 507 /* one flush is safe, as both fields always lie on the same cacheline */ 508 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 509 smp_wmb(); 510 511 return ts_status; 512 } 513 514 static void *packet_lookup_frame(const struct packet_sock *po, 515 const struct packet_ring_buffer *rb, 516 unsigned int position, 517 int status) 518 { 519 unsigned int pg_vec_pos, frame_offset; 520 union tpacket_uhdr h; 521 522 pg_vec_pos = position / rb->frames_per_block; 523 frame_offset = position % rb->frames_per_block; 524 525 h.raw = rb->pg_vec[pg_vec_pos].buffer + 526 (frame_offset * rb->frame_size); 527 528 if (status != __packet_get_status(po, h.raw)) 529 return NULL; 530 531 return h.raw; 532 } 533 534 static void *packet_current_frame(struct packet_sock *po, 535 struct packet_ring_buffer *rb, 536 int status) 537 { 538 return packet_lookup_frame(po, rb, rb->head, status); 539 } 540 541 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 542 { 543 del_timer_sync(&pkc->retire_blk_timer); 544 } 545 546 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 547 struct sk_buff_head *rb_queue) 548 { 549 struct tpacket_kbdq_core *pkc; 550 551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 552 553 spin_lock_bh(&rb_queue->lock); 554 pkc->delete_blk_timer = 1; 555 spin_unlock_bh(&rb_queue->lock); 556 557 prb_del_retire_blk_timer(pkc); 558 } 559 560 static void prb_setup_retire_blk_timer(struct packet_sock *po) 561 { 562 struct tpacket_kbdq_core *pkc; 563 564 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 565 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, 566 0); 567 pkc->retire_blk_timer.expires = jiffies; 568 } 569 570 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 571 int blk_size_in_bytes) 572 { 573 struct net_device *dev; 574 unsigned int mbits, div; 575 struct ethtool_link_ksettings ecmd; 576 int err; 577 578 rtnl_lock(); 579 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 580 if (unlikely(!dev)) { 581 rtnl_unlock(); 582 return DEFAULT_PRB_RETIRE_TOV; 583 } 584 err = __ethtool_get_link_ksettings(dev, &ecmd); 585 rtnl_unlock(); 586 if (err) 587 return DEFAULT_PRB_RETIRE_TOV; 588 589 /* If the link speed is so slow you don't really 590 * need to worry about perf anyways 591 */ 592 if (ecmd.base.speed < SPEED_1000 || 593 ecmd.base.speed == SPEED_UNKNOWN) 594 return DEFAULT_PRB_RETIRE_TOV; 595 596 div = ecmd.base.speed / 1000; 597 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 598 599 if (div) 600 mbits /= div; 601 602 if (div) 603 return mbits + 1; 604 return mbits; 605 } 606 607 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 608 union tpacket_req_u *req_u) 609 { 610 p1->feature_req_word = req_u->req3.tp_feature_req_word; 611 } 612 613 static void init_prb_bdqc(struct packet_sock *po, 614 struct packet_ring_buffer *rb, 615 struct pgv *pg_vec, 616 union tpacket_req_u *req_u) 617 { 618 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 619 struct tpacket_block_desc *pbd; 620 621 memset(p1, 0x0, sizeof(*p1)); 622 623 p1->knxt_seq_num = 1; 624 p1->pkbdq = pg_vec; 625 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 626 p1->pkblk_start = pg_vec[0].buffer; 627 p1->kblk_size = req_u->req3.tp_block_size; 628 p1->knum_blocks = req_u->req3.tp_block_nr; 629 p1->hdrlen = po->tp_hdrlen; 630 p1->version = po->tp_version; 631 p1->last_kactive_blk_num = 0; 632 po->stats.stats3.tp_freeze_q_cnt = 0; 633 if (req_u->req3.tp_retire_blk_tov) 634 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 635 else 636 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 637 req_u->req3.tp_block_size); 638 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 639 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 640 rwlock_init(&p1->blk_fill_in_prog_lock); 641 642 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 643 prb_init_ft_ops(p1, req_u); 644 prb_setup_retire_blk_timer(po); 645 prb_open_block(p1, pbd); 646 } 647 648 /* Do NOT update the last_blk_num first. 649 * Assumes sk_buff_head lock is held. 650 */ 651 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 652 { 653 mod_timer(&pkc->retire_blk_timer, 654 jiffies + pkc->tov_in_jiffies); 655 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 656 } 657 658 /* 659 * Timer logic: 660 * 1) We refresh the timer only when we open a block. 661 * By doing this we don't waste cycles refreshing the timer 662 * on packet-by-packet basis. 663 * 664 * With a 1MB block-size, on a 1Gbps line, it will take 665 * i) ~8 ms to fill a block + ii) memcpy etc. 666 * In this cut we are not accounting for the memcpy time. 667 * 668 * So, if the user sets the 'tmo' to 10ms then the timer 669 * will never fire while the block is still getting filled 670 * (which is what we want). However, the user could choose 671 * to close a block early and that's fine. 672 * 673 * But when the timer does fire, we check whether or not to refresh it. 674 * Since the tmo granularity is in msecs, it is not too expensive 675 * to refresh the timer, lets say every '8' msecs. 676 * Either the user can set the 'tmo' or we can derive it based on 677 * a) line-speed and b) block-size. 678 * prb_calc_retire_blk_tmo() calculates the tmo. 679 * 680 */ 681 static void prb_retire_rx_blk_timer_expired(struct timer_list *t) 682 { 683 struct packet_sock *po = 684 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); 685 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 686 unsigned int frozen; 687 struct tpacket_block_desc *pbd; 688 689 spin_lock(&po->sk.sk_receive_queue.lock); 690 691 frozen = prb_queue_frozen(pkc); 692 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 693 694 if (unlikely(pkc->delete_blk_timer)) 695 goto out; 696 697 /* We only need to plug the race when the block is partially filled. 698 * tpacket_rcv: 699 * lock(); increment BLOCK_NUM_PKTS; unlock() 700 * copy_bits() is in progress ... 701 * timer fires on other cpu: 702 * we can't retire the current block because copy_bits 703 * is in progress. 704 * 705 */ 706 if (BLOCK_NUM_PKTS(pbd)) { 707 /* Waiting for skb_copy_bits to finish... */ 708 write_lock(&pkc->blk_fill_in_prog_lock); 709 write_unlock(&pkc->blk_fill_in_prog_lock); 710 } 711 712 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 713 if (!frozen) { 714 if (!BLOCK_NUM_PKTS(pbd)) { 715 /* An empty block. Just refresh the timer. */ 716 goto refresh_timer; 717 } 718 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 719 if (!prb_dispatch_next_block(pkc, po)) 720 goto refresh_timer; 721 else 722 goto out; 723 } else { 724 /* Case 1. Queue was frozen because user-space was 725 * lagging behind. 726 */ 727 if (prb_curr_blk_in_use(pbd)) { 728 /* 729 * Ok, user-space is still behind. 730 * So just refresh the timer. 731 */ 732 goto refresh_timer; 733 } else { 734 /* Case 2. queue was frozen,user-space caught up, 735 * now the link went idle && the timer fired. 736 * We don't have a block to close.So we open this 737 * block and restart the timer. 738 * opening a block thaws the queue,restarts timer 739 * Thawing/timer-refresh is a side effect. 740 */ 741 prb_open_block(pkc, pbd); 742 goto out; 743 } 744 } 745 } 746 747 refresh_timer: 748 _prb_refresh_rx_retire_blk_timer(pkc); 749 750 out: 751 spin_unlock(&po->sk.sk_receive_queue.lock); 752 } 753 754 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 755 struct tpacket_block_desc *pbd1, __u32 status) 756 { 757 /* Flush everything minus the block header */ 758 759 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 760 u8 *start, *end; 761 762 start = (u8 *)pbd1; 763 764 /* Skip the block header(we know header WILL fit in 4K) */ 765 start += PAGE_SIZE; 766 767 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 768 for (; start < end; start += PAGE_SIZE) 769 flush_dcache_page(pgv_to_page(start)); 770 771 smp_wmb(); 772 #endif 773 774 /* Now update the block status. */ 775 776 BLOCK_STATUS(pbd1) = status; 777 778 /* Flush the block header */ 779 780 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 781 start = (u8 *)pbd1; 782 flush_dcache_page(pgv_to_page(start)); 783 784 smp_wmb(); 785 #endif 786 } 787 788 /* 789 * Side effect: 790 * 791 * 1) flush the block 792 * 2) Increment active_blk_num 793 * 794 * Note:We DONT refresh the timer on purpose. 795 * Because almost always the next block will be opened. 796 */ 797 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 798 struct tpacket_block_desc *pbd1, 799 struct packet_sock *po, unsigned int stat) 800 { 801 __u32 status = TP_STATUS_USER | stat; 802 803 struct tpacket3_hdr *last_pkt; 804 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 805 struct sock *sk = &po->sk; 806 807 if (atomic_read(&po->tp_drops)) 808 status |= TP_STATUS_LOSING; 809 810 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 811 last_pkt->tp_next_offset = 0; 812 813 /* Get the ts of the last pkt */ 814 if (BLOCK_NUM_PKTS(pbd1)) { 815 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 816 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 817 } else { 818 /* Ok, we tmo'd - so get the current time. 819 * 820 * It shouldn't really happen as we don't close empty 821 * blocks. See prb_retire_rx_blk_timer_expired(). 822 */ 823 struct timespec64 ts; 824 ktime_get_real_ts64(&ts); 825 h1->ts_last_pkt.ts_sec = ts.tv_sec; 826 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 827 } 828 829 smp_wmb(); 830 831 /* Flush the block */ 832 prb_flush_block(pkc1, pbd1, status); 833 834 sk->sk_data_ready(sk); 835 836 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 837 } 838 839 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 840 { 841 pkc->reset_pending_on_curr_blk = 0; 842 } 843 844 /* 845 * Side effect of opening a block: 846 * 847 * 1) prb_queue is thawed. 848 * 2) retire_blk_timer is refreshed. 849 * 850 */ 851 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 852 struct tpacket_block_desc *pbd1) 853 { 854 struct timespec64 ts; 855 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 856 857 smp_rmb(); 858 859 /* We could have just memset this but we will lose the 860 * flexibility of making the priv area sticky 861 */ 862 863 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 864 BLOCK_NUM_PKTS(pbd1) = 0; 865 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 866 867 ktime_get_real_ts64(&ts); 868 869 h1->ts_first_pkt.ts_sec = ts.tv_sec; 870 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 871 872 pkc1->pkblk_start = (char *)pbd1; 873 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 874 875 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 876 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 877 878 pbd1->version = pkc1->version; 879 pkc1->prev = pkc1->nxt_offset; 880 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 881 882 prb_thaw_queue(pkc1); 883 _prb_refresh_rx_retire_blk_timer(pkc1); 884 885 smp_wmb(); 886 } 887 888 /* 889 * Queue freeze logic: 890 * 1) Assume tp_block_nr = 8 blocks. 891 * 2) At time 't0', user opens Rx ring. 892 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 893 * 4) user-space is either sleeping or processing block '0'. 894 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 895 * it will close block-7,loop around and try to fill block '0'. 896 * call-flow: 897 * __packet_lookup_frame_in_block 898 * prb_retire_current_block() 899 * prb_dispatch_next_block() 900 * |->(BLOCK_STATUS == USER) evaluates to true 901 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 902 * 6) Now there are two cases: 903 * 6.1) Link goes idle right after the queue is frozen. 904 * But remember, the last open_block() refreshed the timer. 905 * When this timer expires,it will refresh itself so that we can 906 * re-open block-0 in near future. 907 * 6.2) Link is busy and keeps on receiving packets. This is a simple 908 * case and __packet_lookup_frame_in_block will check if block-0 909 * is free and can now be re-used. 910 */ 911 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 912 struct packet_sock *po) 913 { 914 pkc->reset_pending_on_curr_blk = 1; 915 po->stats.stats3.tp_freeze_q_cnt++; 916 } 917 918 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 919 920 /* 921 * If the next block is free then we will dispatch it 922 * and return a good offset. 923 * Else, we will freeze the queue. 924 * So, caller must check the return value. 925 */ 926 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 927 struct packet_sock *po) 928 { 929 struct tpacket_block_desc *pbd; 930 931 smp_rmb(); 932 933 /* 1. Get current block num */ 934 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 935 936 /* 2. If this block is currently in_use then freeze the queue */ 937 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 938 prb_freeze_queue(pkc, po); 939 return NULL; 940 } 941 942 /* 943 * 3. 944 * open this block and return the offset where the first packet 945 * needs to get stored. 946 */ 947 prb_open_block(pkc, pbd); 948 return (void *)pkc->nxt_offset; 949 } 950 951 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 952 struct packet_sock *po, unsigned int status) 953 { 954 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 955 956 /* retire/close the current block */ 957 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 958 /* 959 * Plug the case where copy_bits() is in progress on 960 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 961 * have space to copy the pkt in the current block and 962 * called prb_retire_current_block() 963 * 964 * We don't need to worry about the TMO case because 965 * the timer-handler already handled this case. 966 */ 967 if (!(status & TP_STATUS_BLK_TMO)) { 968 /* Waiting for skb_copy_bits to finish... */ 969 write_lock(&pkc->blk_fill_in_prog_lock); 970 write_unlock(&pkc->blk_fill_in_prog_lock); 971 } 972 prb_close_block(pkc, pbd, po, status); 973 return; 974 } 975 } 976 977 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 978 { 979 return TP_STATUS_USER & BLOCK_STATUS(pbd); 980 } 981 982 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 983 { 984 return pkc->reset_pending_on_curr_blk; 985 } 986 987 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 988 __releases(&pkc->blk_fill_in_prog_lock) 989 { 990 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 991 992 read_unlock(&pkc->blk_fill_in_prog_lock); 993 } 994 995 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 996 struct tpacket3_hdr *ppd) 997 { 998 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 999 } 1000 1001 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 1002 struct tpacket3_hdr *ppd) 1003 { 1004 ppd->hv1.tp_rxhash = 0; 1005 } 1006 1007 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 1008 struct tpacket3_hdr *ppd) 1009 { 1010 if (skb_vlan_tag_present(pkc->skb)) { 1011 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 1012 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 1013 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1014 } else { 1015 ppd->hv1.tp_vlan_tci = 0; 1016 ppd->hv1.tp_vlan_tpid = 0; 1017 ppd->tp_status = TP_STATUS_AVAILABLE; 1018 } 1019 } 1020 1021 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 1022 struct tpacket3_hdr *ppd) 1023 { 1024 ppd->hv1.tp_padding = 0; 1025 prb_fill_vlan_info(pkc, ppd); 1026 1027 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 1028 prb_fill_rxhash(pkc, ppd); 1029 else 1030 prb_clear_rxhash(pkc, ppd); 1031 } 1032 1033 static void prb_fill_curr_block(char *curr, 1034 struct tpacket_kbdq_core *pkc, 1035 struct tpacket_block_desc *pbd, 1036 unsigned int len) 1037 __acquires(&pkc->blk_fill_in_prog_lock) 1038 { 1039 struct tpacket3_hdr *ppd; 1040 1041 ppd = (struct tpacket3_hdr *)curr; 1042 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1043 pkc->prev = curr; 1044 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1045 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1046 BLOCK_NUM_PKTS(pbd) += 1; 1047 read_lock(&pkc->blk_fill_in_prog_lock); 1048 prb_run_all_ft_ops(pkc, ppd); 1049 } 1050 1051 /* Assumes caller has the sk->rx_queue.lock */ 1052 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1053 struct sk_buff *skb, 1054 unsigned int len 1055 ) 1056 { 1057 struct tpacket_kbdq_core *pkc; 1058 struct tpacket_block_desc *pbd; 1059 char *curr, *end; 1060 1061 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1063 1064 /* Queue is frozen when user space is lagging behind */ 1065 if (prb_queue_frozen(pkc)) { 1066 /* 1067 * Check if that last block which caused the queue to freeze, 1068 * is still in_use by user-space. 1069 */ 1070 if (prb_curr_blk_in_use(pbd)) { 1071 /* Can't record this packet */ 1072 return NULL; 1073 } else { 1074 /* 1075 * Ok, the block was released by user-space. 1076 * Now let's open that block. 1077 * opening a block also thaws the queue. 1078 * Thawing is a side effect. 1079 */ 1080 prb_open_block(pkc, pbd); 1081 } 1082 } 1083 1084 smp_mb(); 1085 curr = pkc->nxt_offset; 1086 pkc->skb = skb; 1087 end = (char *)pbd + pkc->kblk_size; 1088 1089 /* first try the current block */ 1090 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1091 prb_fill_curr_block(curr, pkc, pbd, len); 1092 return (void *)curr; 1093 } 1094 1095 /* Ok, close the current block */ 1096 prb_retire_current_block(pkc, po, 0); 1097 1098 /* Now, try to dispatch the next block */ 1099 curr = (char *)prb_dispatch_next_block(pkc, po); 1100 if (curr) { 1101 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1102 prb_fill_curr_block(curr, pkc, pbd, len); 1103 return (void *)curr; 1104 } 1105 1106 /* 1107 * No free blocks are available.user_space hasn't caught up yet. 1108 * Queue was just frozen and now this packet will get dropped. 1109 */ 1110 return NULL; 1111 } 1112 1113 static void *packet_current_rx_frame(struct packet_sock *po, 1114 struct sk_buff *skb, 1115 int status, unsigned int len) 1116 { 1117 char *curr = NULL; 1118 switch (po->tp_version) { 1119 case TPACKET_V1: 1120 case TPACKET_V2: 1121 curr = packet_lookup_frame(po, &po->rx_ring, 1122 po->rx_ring.head, status); 1123 return curr; 1124 case TPACKET_V3: 1125 return __packet_lookup_frame_in_block(po, skb, len); 1126 default: 1127 WARN(1, "TPACKET version not supported\n"); 1128 BUG(); 1129 return NULL; 1130 } 1131 } 1132 1133 static void *prb_lookup_block(const struct packet_sock *po, 1134 const struct packet_ring_buffer *rb, 1135 unsigned int idx, 1136 int status) 1137 { 1138 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1139 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1140 1141 if (status != BLOCK_STATUS(pbd)) 1142 return NULL; 1143 return pbd; 1144 } 1145 1146 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1147 { 1148 unsigned int prev; 1149 if (rb->prb_bdqc.kactive_blk_num) 1150 prev = rb->prb_bdqc.kactive_blk_num-1; 1151 else 1152 prev = rb->prb_bdqc.knum_blocks-1; 1153 return prev; 1154 } 1155 1156 /* Assumes caller has held the rx_queue.lock */ 1157 static void *__prb_previous_block(struct packet_sock *po, 1158 struct packet_ring_buffer *rb, 1159 int status) 1160 { 1161 unsigned int previous = prb_previous_blk_num(rb); 1162 return prb_lookup_block(po, rb, previous, status); 1163 } 1164 1165 static void *packet_previous_rx_frame(struct packet_sock *po, 1166 struct packet_ring_buffer *rb, 1167 int status) 1168 { 1169 if (po->tp_version <= TPACKET_V2) 1170 return packet_previous_frame(po, rb, status); 1171 1172 return __prb_previous_block(po, rb, status); 1173 } 1174 1175 static void packet_increment_rx_head(struct packet_sock *po, 1176 struct packet_ring_buffer *rb) 1177 { 1178 switch (po->tp_version) { 1179 case TPACKET_V1: 1180 case TPACKET_V2: 1181 return packet_increment_head(rb); 1182 case TPACKET_V3: 1183 default: 1184 WARN(1, "TPACKET version not supported.\n"); 1185 BUG(); 1186 return; 1187 } 1188 } 1189 1190 static void *packet_previous_frame(struct packet_sock *po, 1191 struct packet_ring_buffer *rb, 1192 int status) 1193 { 1194 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1195 return packet_lookup_frame(po, rb, previous, status); 1196 } 1197 1198 static void packet_increment_head(struct packet_ring_buffer *buff) 1199 { 1200 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1201 } 1202 1203 static void packet_inc_pending(struct packet_ring_buffer *rb) 1204 { 1205 this_cpu_inc(*rb->pending_refcnt); 1206 } 1207 1208 static void packet_dec_pending(struct packet_ring_buffer *rb) 1209 { 1210 this_cpu_dec(*rb->pending_refcnt); 1211 } 1212 1213 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1214 { 1215 unsigned int refcnt = 0; 1216 int cpu; 1217 1218 /* We don't use pending refcount in rx_ring. */ 1219 if (rb->pending_refcnt == NULL) 1220 return 0; 1221 1222 for_each_possible_cpu(cpu) 1223 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1224 1225 return refcnt; 1226 } 1227 1228 static int packet_alloc_pending(struct packet_sock *po) 1229 { 1230 po->rx_ring.pending_refcnt = NULL; 1231 1232 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1233 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1234 return -ENOBUFS; 1235 1236 return 0; 1237 } 1238 1239 static void packet_free_pending(struct packet_sock *po) 1240 { 1241 free_percpu(po->tx_ring.pending_refcnt); 1242 } 1243 1244 #define ROOM_POW_OFF 2 1245 #define ROOM_NONE 0x0 1246 #define ROOM_LOW 0x1 1247 #define ROOM_NORMAL 0x2 1248 1249 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) 1250 { 1251 int idx, len; 1252 1253 len = READ_ONCE(po->rx_ring.frame_max) + 1; 1254 idx = READ_ONCE(po->rx_ring.head); 1255 if (pow_off) 1256 idx += len >> pow_off; 1257 if (idx >= len) 1258 idx -= len; 1259 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1260 } 1261 1262 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) 1263 { 1264 int idx, len; 1265 1266 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); 1267 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); 1268 if (pow_off) 1269 idx += len >> pow_off; 1270 if (idx >= len) 1271 idx -= len; 1272 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1273 } 1274 1275 static int __packet_rcv_has_room(const struct packet_sock *po, 1276 const struct sk_buff *skb) 1277 { 1278 const struct sock *sk = &po->sk; 1279 int ret = ROOM_NONE; 1280 1281 if (po->prot_hook.func != tpacket_rcv) { 1282 int rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1283 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1284 - (skb ? skb->truesize : 0); 1285 1286 if (avail > (rcvbuf >> ROOM_POW_OFF)) 1287 return ROOM_NORMAL; 1288 else if (avail > 0) 1289 return ROOM_LOW; 1290 else 1291 return ROOM_NONE; 1292 } 1293 1294 if (po->tp_version == TPACKET_V3) { 1295 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1296 ret = ROOM_NORMAL; 1297 else if (__tpacket_v3_has_room(po, 0)) 1298 ret = ROOM_LOW; 1299 } else { 1300 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1301 ret = ROOM_NORMAL; 1302 else if (__tpacket_has_room(po, 0)) 1303 ret = ROOM_LOW; 1304 } 1305 1306 return ret; 1307 } 1308 1309 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1310 { 1311 bool pressure; 1312 int ret; 1313 1314 ret = __packet_rcv_has_room(po, skb); 1315 pressure = ret != ROOM_NORMAL; 1316 1317 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure) 1318 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure); 1319 1320 return ret; 1321 } 1322 1323 static void packet_rcv_try_clear_pressure(struct packet_sock *po) 1324 { 1325 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) && 1326 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 1327 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false); 1328 } 1329 1330 static void packet_sock_destruct(struct sock *sk) 1331 { 1332 skb_queue_purge(&sk->sk_error_queue); 1333 1334 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1335 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1336 1337 if (!sock_flag(sk, SOCK_DEAD)) { 1338 pr_err("Attempt to release alive packet socket: %p\n", sk); 1339 return; 1340 } 1341 } 1342 1343 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1344 { 1345 u32 *history = po->rollover->history; 1346 u32 victim, rxhash; 1347 int i, count = 0; 1348 1349 rxhash = skb_get_hash(skb); 1350 for (i = 0; i < ROLLOVER_HLEN; i++) 1351 if (READ_ONCE(history[i]) == rxhash) 1352 count++; 1353 1354 victim = get_random_u32_below(ROLLOVER_HLEN); 1355 1356 /* Avoid dirtying the cache line if possible */ 1357 if (READ_ONCE(history[victim]) != rxhash) 1358 WRITE_ONCE(history[victim], rxhash); 1359 1360 return count > (ROLLOVER_HLEN >> 1); 1361 } 1362 1363 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1364 struct sk_buff *skb, 1365 unsigned int num) 1366 { 1367 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1368 } 1369 1370 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1371 struct sk_buff *skb, 1372 unsigned int num) 1373 { 1374 unsigned int val = atomic_inc_return(&f->rr_cur); 1375 1376 return val % num; 1377 } 1378 1379 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1380 struct sk_buff *skb, 1381 unsigned int num) 1382 { 1383 return smp_processor_id() % num; 1384 } 1385 1386 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1387 struct sk_buff *skb, 1388 unsigned int num) 1389 { 1390 return get_random_u32_below(num); 1391 } 1392 1393 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1394 struct sk_buff *skb, 1395 unsigned int idx, bool try_self, 1396 unsigned int num) 1397 { 1398 struct packet_sock *po, *po_next, *po_skip = NULL; 1399 unsigned int i, j, room = ROOM_NONE; 1400 1401 po = pkt_sk(rcu_dereference(f->arr[idx])); 1402 1403 if (try_self) { 1404 room = packet_rcv_has_room(po, skb); 1405 if (room == ROOM_NORMAL || 1406 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1407 return idx; 1408 po_skip = po; 1409 } 1410 1411 i = j = min_t(int, po->rollover->sock, num - 1); 1412 do { 1413 po_next = pkt_sk(rcu_dereference(f->arr[i])); 1414 if (po_next != po_skip && 1415 !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) && 1416 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1417 if (i != j) 1418 po->rollover->sock = i; 1419 atomic_long_inc(&po->rollover->num); 1420 if (room == ROOM_LOW) 1421 atomic_long_inc(&po->rollover->num_huge); 1422 return i; 1423 } 1424 1425 if (++i == num) 1426 i = 0; 1427 } while (i != j); 1428 1429 atomic_long_inc(&po->rollover->num_failed); 1430 return idx; 1431 } 1432 1433 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1434 struct sk_buff *skb, 1435 unsigned int num) 1436 { 1437 return skb_get_queue_mapping(skb) % num; 1438 } 1439 1440 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1441 struct sk_buff *skb, 1442 unsigned int num) 1443 { 1444 struct bpf_prog *prog; 1445 unsigned int ret = 0; 1446 1447 rcu_read_lock(); 1448 prog = rcu_dereference(f->bpf_prog); 1449 if (prog) 1450 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1451 rcu_read_unlock(); 1452 1453 return ret; 1454 } 1455 1456 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1457 { 1458 return f->flags & (flag >> 8); 1459 } 1460 1461 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1462 struct packet_type *pt, struct net_device *orig_dev) 1463 { 1464 struct packet_fanout *f = pt->af_packet_priv; 1465 unsigned int num = READ_ONCE(f->num_members); 1466 struct net *net = read_pnet(&f->net); 1467 struct packet_sock *po; 1468 unsigned int idx; 1469 1470 if (!net_eq(dev_net(dev), net) || !num) { 1471 kfree_skb(skb); 1472 return 0; 1473 } 1474 1475 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1476 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1477 if (!skb) 1478 return 0; 1479 } 1480 switch (f->type) { 1481 case PACKET_FANOUT_HASH: 1482 default: 1483 idx = fanout_demux_hash(f, skb, num); 1484 break; 1485 case PACKET_FANOUT_LB: 1486 idx = fanout_demux_lb(f, skb, num); 1487 break; 1488 case PACKET_FANOUT_CPU: 1489 idx = fanout_demux_cpu(f, skb, num); 1490 break; 1491 case PACKET_FANOUT_RND: 1492 idx = fanout_demux_rnd(f, skb, num); 1493 break; 1494 case PACKET_FANOUT_QM: 1495 idx = fanout_demux_qm(f, skb, num); 1496 break; 1497 case PACKET_FANOUT_ROLLOVER: 1498 idx = fanout_demux_rollover(f, skb, 0, false, num); 1499 break; 1500 case PACKET_FANOUT_CBPF: 1501 case PACKET_FANOUT_EBPF: 1502 idx = fanout_demux_bpf(f, skb, num); 1503 break; 1504 } 1505 1506 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1507 idx = fanout_demux_rollover(f, skb, idx, true, num); 1508 1509 po = pkt_sk(rcu_dereference(f->arr[idx])); 1510 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1511 } 1512 1513 DEFINE_MUTEX(fanout_mutex); 1514 EXPORT_SYMBOL_GPL(fanout_mutex); 1515 static LIST_HEAD(fanout_list); 1516 static u16 fanout_next_id; 1517 1518 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1519 { 1520 struct packet_fanout *f = po->fanout; 1521 1522 spin_lock(&f->lock); 1523 rcu_assign_pointer(f->arr[f->num_members], sk); 1524 smp_wmb(); 1525 f->num_members++; 1526 if (f->num_members == 1) 1527 dev_add_pack(&f->prot_hook); 1528 spin_unlock(&f->lock); 1529 } 1530 1531 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1532 { 1533 struct packet_fanout *f = po->fanout; 1534 int i; 1535 1536 spin_lock(&f->lock); 1537 for (i = 0; i < f->num_members; i++) { 1538 if (rcu_dereference_protected(f->arr[i], 1539 lockdep_is_held(&f->lock)) == sk) 1540 break; 1541 } 1542 BUG_ON(i >= f->num_members); 1543 rcu_assign_pointer(f->arr[i], 1544 rcu_dereference_protected(f->arr[f->num_members - 1], 1545 lockdep_is_held(&f->lock))); 1546 f->num_members--; 1547 if (f->num_members == 0) 1548 __dev_remove_pack(&f->prot_hook); 1549 spin_unlock(&f->lock); 1550 } 1551 1552 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1553 { 1554 if (sk->sk_family != PF_PACKET) 1555 return false; 1556 1557 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1558 } 1559 1560 static void fanout_init_data(struct packet_fanout *f) 1561 { 1562 switch (f->type) { 1563 case PACKET_FANOUT_LB: 1564 atomic_set(&f->rr_cur, 0); 1565 break; 1566 case PACKET_FANOUT_CBPF: 1567 case PACKET_FANOUT_EBPF: 1568 RCU_INIT_POINTER(f->bpf_prog, NULL); 1569 break; 1570 } 1571 } 1572 1573 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1574 { 1575 struct bpf_prog *old; 1576 1577 spin_lock(&f->lock); 1578 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1579 rcu_assign_pointer(f->bpf_prog, new); 1580 spin_unlock(&f->lock); 1581 1582 if (old) { 1583 synchronize_net(); 1584 bpf_prog_destroy(old); 1585 } 1586 } 1587 1588 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, 1589 unsigned int len) 1590 { 1591 struct bpf_prog *new; 1592 struct sock_fprog fprog; 1593 int ret; 1594 1595 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1596 return -EPERM; 1597 1598 ret = copy_bpf_fprog_from_user(&fprog, data, len); 1599 if (ret) 1600 return ret; 1601 1602 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1603 if (ret) 1604 return ret; 1605 1606 __fanout_set_data_bpf(po->fanout, new); 1607 return 0; 1608 } 1609 1610 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, 1611 unsigned int len) 1612 { 1613 struct bpf_prog *new; 1614 u32 fd; 1615 1616 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1617 return -EPERM; 1618 if (len != sizeof(fd)) 1619 return -EINVAL; 1620 if (copy_from_sockptr(&fd, data, len)) 1621 return -EFAULT; 1622 1623 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1624 if (IS_ERR(new)) 1625 return PTR_ERR(new); 1626 1627 __fanout_set_data_bpf(po->fanout, new); 1628 return 0; 1629 } 1630 1631 static int fanout_set_data(struct packet_sock *po, sockptr_t data, 1632 unsigned int len) 1633 { 1634 switch (po->fanout->type) { 1635 case PACKET_FANOUT_CBPF: 1636 return fanout_set_data_cbpf(po, data, len); 1637 case PACKET_FANOUT_EBPF: 1638 return fanout_set_data_ebpf(po, data, len); 1639 default: 1640 return -EINVAL; 1641 } 1642 } 1643 1644 static void fanout_release_data(struct packet_fanout *f) 1645 { 1646 switch (f->type) { 1647 case PACKET_FANOUT_CBPF: 1648 case PACKET_FANOUT_EBPF: 1649 __fanout_set_data_bpf(f, NULL); 1650 } 1651 } 1652 1653 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1654 { 1655 struct packet_fanout *f; 1656 1657 list_for_each_entry(f, &fanout_list, list) { 1658 if (f->id == candidate_id && 1659 read_pnet(&f->net) == sock_net(sk)) { 1660 return false; 1661 } 1662 } 1663 return true; 1664 } 1665 1666 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1667 { 1668 u16 id = fanout_next_id; 1669 1670 do { 1671 if (__fanout_id_is_free(sk, id)) { 1672 *new_id = id; 1673 fanout_next_id = id + 1; 1674 return true; 1675 } 1676 1677 id++; 1678 } while (id != fanout_next_id); 1679 1680 return false; 1681 } 1682 1683 static int fanout_add(struct sock *sk, struct fanout_args *args) 1684 { 1685 struct packet_rollover *rollover = NULL; 1686 struct packet_sock *po = pkt_sk(sk); 1687 u16 type_flags = args->type_flags; 1688 struct packet_fanout *f, *match; 1689 u8 type = type_flags & 0xff; 1690 u8 flags = type_flags >> 8; 1691 u16 id = args->id; 1692 int err; 1693 1694 switch (type) { 1695 case PACKET_FANOUT_ROLLOVER: 1696 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1697 return -EINVAL; 1698 break; 1699 case PACKET_FANOUT_HASH: 1700 case PACKET_FANOUT_LB: 1701 case PACKET_FANOUT_CPU: 1702 case PACKET_FANOUT_RND: 1703 case PACKET_FANOUT_QM: 1704 case PACKET_FANOUT_CBPF: 1705 case PACKET_FANOUT_EBPF: 1706 break; 1707 default: 1708 return -EINVAL; 1709 } 1710 1711 mutex_lock(&fanout_mutex); 1712 1713 err = -EALREADY; 1714 if (po->fanout) 1715 goto out; 1716 1717 if (type == PACKET_FANOUT_ROLLOVER || 1718 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1719 err = -ENOMEM; 1720 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1721 if (!rollover) 1722 goto out; 1723 atomic_long_set(&rollover->num, 0); 1724 atomic_long_set(&rollover->num_huge, 0); 1725 atomic_long_set(&rollover->num_failed, 0); 1726 } 1727 1728 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1729 if (id != 0) { 1730 err = -EINVAL; 1731 goto out; 1732 } 1733 if (!fanout_find_new_id(sk, &id)) { 1734 err = -ENOMEM; 1735 goto out; 1736 } 1737 /* ephemeral flag for the first socket in the group: drop it */ 1738 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1739 } 1740 1741 match = NULL; 1742 list_for_each_entry(f, &fanout_list, list) { 1743 if (f->id == id && 1744 read_pnet(&f->net) == sock_net(sk)) { 1745 match = f; 1746 break; 1747 } 1748 } 1749 err = -EINVAL; 1750 if (match) { 1751 if (match->flags != flags) 1752 goto out; 1753 if (args->max_num_members && 1754 args->max_num_members != match->max_num_members) 1755 goto out; 1756 } else { 1757 if (args->max_num_members > PACKET_FANOUT_MAX) 1758 goto out; 1759 if (!args->max_num_members) 1760 /* legacy PACKET_FANOUT_MAX */ 1761 args->max_num_members = 256; 1762 err = -ENOMEM; 1763 match = kvzalloc(struct_size(match, arr, args->max_num_members), 1764 GFP_KERNEL); 1765 if (!match) 1766 goto out; 1767 write_pnet(&match->net, sock_net(sk)); 1768 match->id = id; 1769 match->type = type; 1770 match->flags = flags; 1771 INIT_LIST_HEAD(&match->list); 1772 spin_lock_init(&match->lock); 1773 refcount_set(&match->sk_ref, 0); 1774 fanout_init_data(match); 1775 match->prot_hook.type = po->prot_hook.type; 1776 match->prot_hook.dev = po->prot_hook.dev; 1777 match->prot_hook.func = packet_rcv_fanout; 1778 match->prot_hook.af_packet_priv = match; 1779 match->prot_hook.af_packet_net = read_pnet(&match->net); 1780 match->prot_hook.id_match = match_fanout_group; 1781 match->max_num_members = args->max_num_members; 1782 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING; 1783 list_add(&match->list, &fanout_list); 1784 } 1785 err = -EINVAL; 1786 1787 spin_lock(&po->bind_lock); 1788 if (packet_sock_flag(po, PACKET_SOCK_RUNNING) && 1789 match->type == type && 1790 match->prot_hook.type == po->prot_hook.type && 1791 match->prot_hook.dev == po->prot_hook.dev) { 1792 err = -ENOSPC; 1793 if (refcount_read(&match->sk_ref) < match->max_num_members) { 1794 __dev_remove_pack(&po->prot_hook); 1795 1796 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ 1797 WRITE_ONCE(po->fanout, match); 1798 1799 po->rollover = rollover; 1800 rollover = NULL; 1801 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1802 __fanout_link(sk, po); 1803 err = 0; 1804 } 1805 } 1806 spin_unlock(&po->bind_lock); 1807 1808 if (err && !refcount_read(&match->sk_ref)) { 1809 list_del(&match->list); 1810 kvfree(match); 1811 } 1812 1813 out: 1814 kfree(rollover); 1815 mutex_unlock(&fanout_mutex); 1816 return err; 1817 } 1818 1819 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1820 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1821 * It is the responsibility of the caller to call fanout_release_data() and 1822 * free the returned packet_fanout (after synchronize_net()) 1823 */ 1824 static struct packet_fanout *fanout_release(struct sock *sk) 1825 { 1826 struct packet_sock *po = pkt_sk(sk); 1827 struct packet_fanout *f; 1828 1829 mutex_lock(&fanout_mutex); 1830 f = po->fanout; 1831 if (f) { 1832 po->fanout = NULL; 1833 1834 if (refcount_dec_and_test(&f->sk_ref)) 1835 list_del(&f->list); 1836 else 1837 f = NULL; 1838 } 1839 mutex_unlock(&fanout_mutex); 1840 1841 return f; 1842 } 1843 1844 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1845 struct sk_buff *skb) 1846 { 1847 /* Earlier code assumed this would be a VLAN pkt, double-check 1848 * this now that we have the actual packet in hand. We can only 1849 * do this check on Ethernet devices. 1850 */ 1851 if (unlikely(dev->type != ARPHRD_ETHER)) 1852 return false; 1853 1854 skb_reset_mac_header(skb); 1855 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1856 } 1857 1858 static const struct proto_ops packet_ops; 1859 1860 static const struct proto_ops packet_ops_spkt; 1861 1862 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1863 struct packet_type *pt, struct net_device *orig_dev) 1864 { 1865 struct sock *sk; 1866 struct sockaddr_pkt *spkt; 1867 1868 /* 1869 * When we registered the protocol we saved the socket in the data 1870 * field for just this event. 1871 */ 1872 1873 sk = pt->af_packet_priv; 1874 1875 /* 1876 * Yank back the headers [hope the device set this 1877 * right or kerboom...] 1878 * 1879 * Incoming packets have ll header pulled, 1880 * push it back. 1881 * 1882 * For outgoing ones skb->data == skb_mac_header(skb) 1883 * so that this procedure is noop. 1884 */ 1885 1886 if (skb->pkt_type == PACKET_LOOPBACK) 1887 goto out; 1888 1889 if (!net_eq(dev_net(dev), sock_net(sk))) 1890 goto out; 1891 1892 skb = skb_share_check(skb, GFP_ATOMIC); 1893 if (skb == NULL) 1894 goto oom; 1895 1896 /* drop any routing info */ 1897 skb_dst_drop(skb); 1898 1899 /* drop conntrack reference */ 1900 nf_reset_ct(skb); 1901 1902 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1903 1904 skb_push(skb, skb->data - skb_mac_header(skb)); 1905 1906 /* 1907 * The SOCK_PACKET socket receives _all_ frames. 1908 */ 1909 1910 spkt->spkt_family = dev->type; 1911 strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1912 spkt->spkt_protocol = skb->protocol; 1913 1914 /* 1915 * Charge the memory to the socket. This is done specifically 1916 * to prevent sockets using all the memory up. 1917 */ 1918 1919 if (sock_queue_rcv_skb(sk, skb) == 0) 1920 return 0; 1921 1922 out: 1923 kfree_skb(skb); 1924 oom: 1925 return 0; 1926 } 1927 1928 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) 1929 { 1930 int depth; 1931 1932 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && 1933 sock->type == SOCK_RAW) { 1934 skb_reset_mac_header(skb); 1935 skb->protocol = dev_parse_header_protocol(skb); 1936 } 1937 1938 /* Move network header to the right position for VLAN tagged packets */ 1939 if (likely(skb->dev->type == ARPHRD_ETHER) && 1940 eth_type_vlan(skb->protocol) && 1941 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) 1942 skb_set_network_header(skb, depth); 1943 1944 skb_probe_transport_header(skb); 1945 } 1946 1947 /* 1948 * Output a raw packet to a device layer. This bypasses all the other 1949 * protocol layers and you must therefore supply it with a complete frame 1950 */ 1951 1952 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1953 size_t len) 1954 { 1955 struct sock *sk = sock->sk; 1956 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1957 struct sk_buff *skb = NULL; 1958 struct net_device *dev; 1959 struct sockcm_cookie sockc; 1960 __be16 proto = 0; 1961 int err; 1962 int extra_len = 0; 1963 1964 /* 1965 * Get and verify the address. 1966 */ 1967 1968 if (saddr) { 1969 if (msg->msg_namelen < sizeof(struct sockaddr)) 1970 return -EINVAL; 1971 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1972 proto = saddr->spkt_protocol; 1973 } else 1974 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1975 1976 /* 1977 * Find the device first to size check it 1978 */ 1979 1980 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1981 retry: 1982 rcu_read_lock(); 1983 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1984 err = -ENODEV; 1985 if (dev == NULL) 1986 goto out_unlock; 1987 1988 err = -ENETDOWN; 1989 if (!(dev->flags & IFF_UP)) 1990 goto out_unlock; 1991 1992 /* 1993 * You may not queue a frame bigger than the mtu. This is the lowest level 1994 * raw protocol and you must do your own fragmentation at this level. 1995 */ 1996 1997 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1998 if (!netif_supports_nofcs(dev)) { 1999 err = -EPROTONOSUPPORT; 2000 goto out_unlock; 2001 } 2002 extra_len = 4; /* We're doing our own CRC */ 2003 } 2004 2005 err = -EMSGSIZE; 2006 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 2007 goto out_unlock; 2008 2009 if (!skb) { 2010 size_t reserved = LL_RESERVED_SPACE(dev); 2011 int tlen = dev->needed_tailroom; 2012 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 2013 2014 rcu_read_unlock(); 2015 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 2016 if (skb == NULL) 2017 return -ENOBUFS; 2018 /* FIXME: Save some space for broken drivers that write a hard 2019 * header at transmission time by themselves. PPP is the notable 2020 * one here. This should really be fixed at the driver level. 2021 */ 2022 skb_reserve(skb, reserved); 2023 skb_reset_network_header(skb); 2024 2025 /* Try to align data part correctly */ 2026 if (hhlen) { 2027 skb->data -= hhlen; 2028 skb->tail -= hhlen; 2029 if (len < hhlen) 2030 skb_reset_network_header(skb); 2031 } 2032 err = memcpy_from_msg(skb_put(skb, len), msg, len); 2033 if (err) 2034 goto out_free; 2035 goto retry; 2036 } 2037 2038 if (!dev_validate_header(dev, skb->data, len) || !skb->len) { 2039 err = -EINVAL; 2040 goto out_unlock; 2041 } 2042 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 2043 !packet_extra_vlan_len_allowed(dev, skb)) { 2044 err = -EMSGSIZE; 2045 goto out_unlock; 2046 } 2047 2048 sockcm_init(&sockc, sk); 2049 if (msg->msg_controllen) { 2050 err = sock_cmsg_send(sk, msg, &sockc); 2051 if (unlikely(err)) 2052 goto out_unlock; 2053 } 2054 2055 skb->protocol = proto; 2056 skb->dev = dev; 2057 skb->priority = READ_ONCE(sk->sk_priority); 2058 skb->mark = READ_ONCE(sk->sk_mark); 2059 skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); 2060 skb_setup_tx_timestamp(skb, sockc.tsflags); 2061 2062 if (unlikely(extra_len == 4)) 2063 skb->no_fcs = 1; 2064 2065 packet_parse_headers(skb, sock); 2066 2067 dev_queue_xmit(skb); 2068 rcu_read_unlock(); 2069 return len; 2070 2071 out_unlock: 2072 rcu_read_unlock(); 2073 out_free: 2074 kfree_skb(skb); 2075 return err; 2076 } 2077 2078 static unsigned int run_filter(struct sk_buff *skb, 2079 const struct sock *sk, 2080 unsigned int res) 2081 { 2082 struct sk_filter *filter; 2083 2084 rcu_read_lock(); 2085 filter = rcu_dereference(sk->sk_filter); 2086 if (filter != NULL) 2087 res = bpf_prog_run_clear_cb(filter->prog, skb); 2088 rcu_read_unlock(); 2089 2090 return res; 2091 } 2092 2093 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2094 size_t *len, int vnet_hdr_sz) 2095 { 2096 struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 }; 2097 2098 if (*len < vnet_hdr_sz) 2099 return -EINVAL; 2100 *len -= vnet_hdr_sz; 2101 2102 if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0)) 2103 return -EINVAL; 2104 2105 return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz); 2106 } 2107 2108 /* 2109 * This function makes lazy skb cloning in hope that most of packets 2110 * are discarded by BPF. 2111 * 2112 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2113 * and skb->cb are mangled. It works because (and until) packets 2114 * falling here are owned by current CPU. Output packets are cloned 2115 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2116 * sequentially, so that if we return skb to original state on exit, 2117 * we will not harm anyone. 2118 */ 2119 2120 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2121 struct packet_type *pt, struct net_device *orig_dev) 2122 { 2123 enum skb_drop_reason drop_reason = SKB_CONSUMED; 2124 struct sock *sk = NULL; 2125 struct sockaddr_ll *sll; 2126 struct packet_sock *po; 2127 u8 *skb_head = skb->data; 2128 int skb_len = skb->len; 2129 unsigned int snaplen, res; 2130 2131 if (skb->pkt_type == PACKET_LOOPBACK) 2132 goto drop; 2133 2134 sk = pt->af_packet_priv; 2135 po = pkt_sk(sk); 2136 2137 if (!net_eq(dev_net(dev), sock_net(sk))) 2138 goto drop; 2139 2140 skb->dev = dev; 2141 2142 if (dev_has_header(dev)) { 2143 /* The device has an explicit notion of ll header, 2144 * exported to higher levels. 2145 * 2146 * Otherwise, the device hides details of its frame 2147 * structure, so that corresponding packet head is 2148 * never delivered to user. 2149 */ 2150 if (sk->sk_type != SOCK_DGRAM) 2151 skb_push(skb, skb->data - skb_mac_header(skb)); 2152 else if (skb->pkt_type == PACKET_OUTGOING) { 2153 /* Special case: outgoing packets have ll header at head */ 2154 skb_pull(skb, skb_network_offset(skb)); 2155 } 2156 } 2157 2158 snaplen = skb->len; 2159 2160 res = run_filter(skb, sk, snaplen); 2161 if (!res) 2162 goto drop_n_restore; 2163 if (snaplen > res) 2164 snaplen = res; 2165 2166 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2167 goto drop_n_acct; 2168 2169 if (skb_shared(skb)) { 2170 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2171 if (nskb == NULL) 2172 goto drop_n_acct; 2173 2174 if (skb_head != skb->data) { 2175 skb->data = skb_head; 2176 skb->len = skb_len; 2177 } 2178 consume_skb(skb); 2179 skb = nskb; 2180 } 2181 2182 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2183 2184 sll = &PACKET_SKB_CB(skb)->sa.ll; 2185 sll->sll_hatype = dev->type; 2186 sll->sll_pkttype = skb->pkt_type; 2187 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2188 sll->sll_ifindex = orig_dev->ifindex; 2189 else 2190 sll->sll_ifindex = dev->ifindex; 2191 2192 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2193 2194 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2195 * Use their space for storing the original skb length. 2196 */ 2197 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2198 2199 if (pskb_trim(skb, snaplen)) 2200 goto drop_n_acct; 2201 2202 skb_set_owner_r(skb, sk); 2203 skb->dev = NULL; 2204 skb_dst_drop(skb); 2205 2206 /* drop conntrack reference */ 2207 nf_reset_ct(skb); 2208 2209 spin_lock(&sk->sk_receive_queue.lock); 2210 po->stats.stats1.tp_packets++; 2211 sock_skb_set_dropcount(sk, skb); 2212 skb_clear_delivery_time(skb); 2213 __skb_queue_tail(&sk->sk_receive_queue, skb); 2214 spin_unlock(&sk->sk_receive_queue.lock); 2215 sk->sk_data_ready(sk); 2216 return 0; 2217 2218 drop_n_acct: 2219 atomic_inc(&po->tp_drops); 2220 atomic_inc(&sk->sk_drops); 2221 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2222 2223 drop_n_restore: 2224 if (skb_head != skb->data && skb_shared(skb)) { 2225 skb->data = skb_head; 2226 skb->len = skb_len; 2227 } 2228 drop: 2229 sk_skb_reason_drop(sk, skb, drop_reason); 2230 return 0; 2231 } 2232 2233 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2234 struct packet_type *pt, struct net_device *orig_dev) 2235 { 2236 enum skb_drop_reason drop_reason = SKB_CONSUMED; 2237 struct sock *sk = NULL; 2238 struct packet_sock *po; 2239 struct sockaddr_ll *sll; 2240 union tpacket_uhdr h; 2241 u8 *skb_head = skb->data; 2242 int skb_len = skb->len; 2243 unsigned int snaplen, res; 2244 unsigned long status = TP_STATUS_USER; 2245 unsigned short macoff, hdrlen; 2246 unsigned int netoff; 2247 struct sk_buff *copy_skb = NULL; 2248 struct timespec64 ts; 2249 __u32 ts_status; 2250 unsigned int slot_id = 0; 2251 int vnet_hdr_sz = 0; 2252 2253 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2254 * We may add members to them until current aligned size without forcing 2255 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2256 */ 2257 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2258 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2259 2260 if (skb->pkt_type == PACKET_LOOPBACK) 2261 goto drop; 2262 2263 sk = pt->af_packet_priv; 2264 po = pkt_sk(sk); 2265 2266 if (!net_eq(dev_net(dev), sock_net(sk))) 2267 goto drop; 2268 2269 if (dev_has_header(dev)) { 2270 if (sk->sk_type != SOCK_DGRAM) 2271 skb_push(skb, skb->data - skb_mac_header(skb)); 2272 else if (skb->pkt_type == PACKET_OUTGOING) { 2273 /* Special case: outgoing packets have ll header at head */ 2274 skb_pull(skb, skb_network_offset(skb)); 2275 } 2276 } 2277 2278 snaplen = skb->len; 2279 2280 res = run_filter(skb, sk, snaplen); 2281 if (!res) 2282 goto drop_n_restore; 2283 2284 /* If we are flooded, just give up */ 2285 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { 2286 atomic_inc(&po->tp_drops); 2287 goto drop_n_restore; 2288 } 2289 2290 if (skb->ip_summed == CHECKSUM_PARTIAL) 2291 status |= TP_STATUS_CSUMNOTREADY; 2292 else if (skb->pkt_type != PACKET_OUTGOING && 2293 skb_csum_unnecessary(skb)) 2294 status |= TP_STATUS_CSUM_VALID; 2295 if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) 2296 status |= TP_STATUS_GSO_TCP; 2297 2298 if (snaplen > res) 2299 snaplen = res; 2300 2301 if (sk->sk_type == SOCK_DGRAM) { 2302 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2303 po->tp_reserve; 2304 } else { 2305 unsigned int maclen = skb_network_offset(skb); 2306 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2307 (maclen < 16 ? 16 : maclen)) + 2308 po->tp_reserve; 2309 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2310 if (vnet_hdr_sz) 2311 netoff += vnet_hdr_sz; 2312 macoff = netoff - maclen; 2313 } 2314 if (netoff > USHRT_MAX) { 2315 atomic_inc(&po->tp_drops); 2316 goto drop_n_restore; 2317 } 2318 if (po->tp_version <= TPACKET_V2) { 2319 if (macoff + snaplen > po->rx_ring.frame_size) { 2320 if (READ_ONCE(po->copy_thresh) && 2321 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2322 if (skb_shared(skb)) { 2323 copy_skb = skb_clone(skb, GFP_ATOMIC); 2324 } else { 2325 copy_skb = skb_get(skb); 2326 skb_head = skb->data; 2327 } 2328 if (copy_skb) { 2329 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0, 2330 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); 2331 skb_set_owner_r(copy_skb, sk); 2332 } 2333 } 2334 snaplen = po->rx_ring.frame_size - macoff; 2335 if ((int)snaplen < 0) { 2336 snaplen = 0; 2337 vnet_hdr_sz = 0; 2338 } 2339 } 2340 } else if (unlikely(macoff + snaplen > 2341 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2342 u32 nval; 2343 2344 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2345 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2346 snaplen, nval, macoff); 2347 snaplen = nval; 2348 if (unlikely((int)snaplen < 0)) { 2349 snaplen = 0; 2350 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2351 vnet_hdr_sz = 0; 2352 } 2353 } 2354 spin_lock(&sk->sk_receive_queue.lock); 2355 h.raw = packet_current_rx_frame(po, skb, 2356 TP_STATUS_KERNEL, (macoff+snaplen)); 2357 if (!h.raw) 2358 goto drop_n_account; 2359 2360 if (po->tp_version <= TPACKET_V2) { 2361 slot_id = po->rx_ring.head; 2362 if (test_bit(slot_id, po->rx_ring.rx_owner_map)) 2363 goto drop_n_account; 2364 __set_bit(slot_id, po->rx_ring.rx_owner_map); 2365 } 2366 2367 if (vnet_hdr_sz && 2368 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2369 sizeof(struct virtio_net_hdr), 2370 vio_le(), true, 0)) { 2371 if (po->tp_version == TPACKET_V3) 2372 prb_clear_blk_fill_status(&po->rx_ring); 2373 goto drop_n_account; 2374 } 2375 2376 if (po->tp_version <= TPACKET_V2) { 2377 packet_increment_rx_head(po, &po->rx_ring); 2378 /* 2379 * LOSING will be reported till you read the stats, 2380 * because it's COR - Clear On Read. 2381 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2382 * at packet level. 2383 */ 2384 if (atomic_read(&po->tp_drops)) 2385 status |= TP_STATUS_LOSING; 2386 } 2387 2388 po->stats.stats1.tp_packets++; 2389 if (copy_skb) { 2390 status |= TP_STATUS_COPY; 2391 skb_clear_delivery_time(copy_skb); 2392 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2393 } 2394 spin_unlock(&sk->sk_receive_queue.lock); 2395 2396 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2397 2398 /* Always timestamp; prefer an existing software timestamp taken 2399 * closer to the time of capture. 2400 */ 2401 ts_status = tpacket_get_timestamp(skb, &ts, 2402 READ_ONCE(po->tp_tstamp) | 2403 SOF_TIMESTAMPING_SOFTWARE); 2404 if (!ts_status) 2405 ktime_get_real_ts64(&ts); 2406 2407 status |= ts_status; 2408 2409 switch (po->tp_version) { 2410 case TPACKET_V1: 2411 h.h1->tp_len = skb->len; 2412 h.h1->tp_snaplen = snaplen; 2413 h.h1->tp_mac = macoff; 2414 h.h1->tp_net = netoff; 2415 h.h1->tp_sec = ts.tv_sec; 2416 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2417 hdrlen = sizeof(*h.h1); 2418 break; 2419 case TPACKET_V2: 2420 h.h2->tp_len = skb->len; 2421 h.h2->tp_snaplen = snaplen; 2422 h.h2->tp_mac = macoff; 2423 h.h2->tp_net = netoff; 2424 h.h2->tp_sec = ts.tv_sec; 2425 h.h2->tp_nsec = ts.tv_nsec; 2426 if (skb_vlan_tag_present(skb)) { 2427 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2428 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2429 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2430 } else { 2431 h.h2->tp_vlan_tci = 0; 2432 h.h2->tp_vlan_tpid = 0; 2433 } 2434 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2435 hdrlen = sizeof(*h.h2); 2436 break; 2437 case TPACKET_V3: 2438 /* tp_nxt_offset,vlan are already populated above. 2439 * So DONT clear those fields here 2440 */ 2441 h.h3->tp_status |= status; 2442 h.h3->tp_len = skb->len; 2443 h.h3->tp_snaplen = snaplen; 2444 h.h3->tp_mac = macoff; 2445 h.h3->tp_net = netoff; 2446 h.h3->tp_sec = ts.tv_sec; 2447 h.h3->tp_nsec = ts.tv_nsec; 2448 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2449 hdrlen = sizeof(*h.h3); 2450 break; 2451 default: 2452 BUG(); 2453 } 2454 2455 sll = h.raw + TPACKET_ALIGN(hdrlen); 2456 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2457 sll->sll_family = AF_PACKET; 2458 sll->sll_hatype = dev->type; 2459 sll->sll_protocol = skb->protocol; 2460 sll->sll_pkttype = skb->pkt_type; 2461 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2462 sll->sll_ifindex = orig_dev->ifindex; 2463 else 2464 sll->sll_ifindex = dev->ifindex; 2465 2466 smp_mb(); 2467 2468 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2469 if (po->tp_version <= TPACKET_V2) { 2470 u8 *start, *end; 2471 2472 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2473 macoff + snaplen); 2474 2475 for (start = h.raw; start < end; start += PAGE_SIZE) 2476 flush_dcache_page(pgv_to_page(start)); 2477 } 2478 smp_wmb(); 2479 #endif 2480 2481 if (po->tp_version <= TPACKET_V2) { 2482 spin_lock(&sk->sk_receive_queue.lock); 2483 __packet_set_status(po, h.raw, status); 2484 __clear_bit(slot_id, po->rx_ring.rx_owner_map); 2485 spin_unlock(&sk->sk_receive_queue.lock); 2486 sk->sk_data_ready(sk); 2487 } else if (po->tp_version == TPACKET_V3) { 2488 prb_clear_blk_fill_status(&po->rx_ring); 2489 } 2490 2491 drop_n_restore: 2492 if (skb_head != skb->data && skb_shared(skb)) { 2493 skb->data = skb_head; 2494 skb->len = skb_len; 2495 } 2496 drop: 2497 sk_skb_reason_drop(sk, skb, drop_reason); 2498 return 0; 2499 2500 drop_n_account: 2501 spin_unlock(&sk->sk_receive_queue.lock); 2502 atomic_inc(&po->tp_drops); 2503 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2504 2505 sk->sk_data_ready(sk); 2506 sk_skb_reason_drop(sk, copy_skb, drop_reason); 2507 goto drop_n_restore; 2508 } 2509 2510 static void tpacket_destruct_skb(struct sk_buff *skb) 2511 { 2512 struct packet_sock *po = pkt_sk(skb->sk); 2513 2514 if (likely(po->tx_ring.pg_vec)) { 2515 void *ph; 2516 __u32 ts; 2517 2518 ph = skb_zcopy_get_nouarg(skb); 2519 packet_dec_pending(&po->tx_ring); 2520 2521 ts = __packet_set_timestamp(po, ph, skb); 2522 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2523 2524 complete(&po->skb_completion); 2525 } 2526 2527 sock_wfree(skb); 2528 } 2529 2530 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2531 { 2532 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2533 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2534 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2535 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2536 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2537 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2538 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2539 2540 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2541 return -EINVAL; 2542 2543 return 0; 2544 } 2545 2546 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2547 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz) 2548 { 2549 int ret; 2550 2551 if (*len < vnet_hdr_sz) 2552 return -EINVAL; 2553 *len -= vnet_hdr_sz; 2554 2555 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2556 return -EFAULT; 2557 2558 ret = __packet_snd_vnet_parse(vnet_hdr, *len); 2559 if (ret) 2560 return ret; 2561 2562 /* move iter to point to the start of mac header */ 2563 if (vnet_hdr_sz != sizeof(struct virtio_net_hdr)) 2564 iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr)); 2565 2566 return 0; 2567 } 2568 2569 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2570 void *frame, struct net_device *dev, void *data, int tp_len, 2571 __be16 proto, unsigned char *addr, int hlen, int copylen, 2572 const struct sockcm_cookie *sockc) 2573 { 2574 union tpacket_uhdr ph; 2575 int to_write, offset, len, nr_frags, len_max; 2576 struct socket *sock = po->sk.sk_socket; 2577 struct page *page; 2578 int err; 2579 2580 ph.raw = frame; 2581 2582 skb->protocol = proto; 2583 skb->dev = dev; 2584 skb->priority = READ_ONCE(po->sk.sk_priority); 2585 skb->mark = READ_ONCE(po->sk.sk_mark); 2586 skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid); 2587 skb_setup_tx_timestamp(skb, sockc->tsflags); 2588 skb_zcopy_set_nouarg(skb, ph.raw); 2589 2590 skb_reserve(skb, hlen); 2591 skb_reset_network_header(skb); 2592 2593 to_write = tp_len; 2594 2595 if (sock->type == SOCK_DGRAM) { 2596 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2597 NULL, tp_len); 2598 if (unlikely(err < 0)) 2599 return -EINVAL; 2600 } else if (copylen) { 2601 int hdrlen = min_t(int, copylen, tp_len); 2602 2603 skb_push(skb, dev->hard_header_len); 2604 skb_put(skb, copylen - dev->hard_header_len); 2605 err = skb_store_bits(skb, 0, data, hdrlen); 2606 if (unlikely(err)) 2607 return err; 2608 if (!dev_validate_header(dev, skb->data, hdrlen)) 2609 return -EINVAL; 2610 2611 data += hdrlen; 2612 to_write -= hdrlen; 2613 } 2614 2615 offset = offset_in_page(data); 2616 len_max = PAGE_SIZE - offset; 2617 len = ((to_write > len_max) ? len_max : to_write); 2618 2619 skb->data_len = to_write; 2620 skb->len += to_write; 2621 skb->truesize += to_write; 2622 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2623 2624 while (likely(to_write)) { 2625 nr_frags = skb_shinfo(skb)->nr_frags; 2626 2627 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2628 pr_err("Packet exceed the number of skb frags(%u)\n", 2629 (unsigned int)MAX_SKB_FRAGS); 2630 return -EFAULT; 2631 } 2632 2633 page = pgv_to_page(data); 2634 data += len; 2635 flush_dcache_page(page); 2636 get_page(page); 2637 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2638 to_write -= len; 2639 offset = 0; 2640 len_max = PAGE_SIZE; 2641 len = ((to_write > len_max) ? len_max : to_write); 2642 } 2643 2644 packet_parse_headers(skb, sock); 2645 2646 return tp_len; 2647 } 2648 2649 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2650 int size_max, void **data) 2651 { 2652 union tpacket_uhdr ph; 2653 int tp_len, off; 2654 2655 ph.raw = frame; 2656 2657 switch (po->tp_version) { 2658 case TPACKET_V3: 2659 if (ph.h3->tp_next_offset != 0) { 2660 pr_warn_once("variable sized slot not supported"); 2661 return -EINVAL; 2662 } 2663 tp_len = ph.h3->tp_len; 2664 break; 2665 case TPACKET_V2: 2666 tp_len = ph.h2->tp_len; 2667 break; 2668 default: 2669 tp_len = ph.h1->tp_len; 2670 break; 2671 } 2672 if (unlikely(tp_len > size_max)) { 2673 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2674 return -EMSGSIZE; 2675 } 2676 2677 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) { 2678 int off_min, off_max; 2679 2680 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2681 off_max = po->tx_ring.frame_size - tp_len; 2682 if (po->sk.sk_type == SOCK_DGRAM) { 2683 switch (po->tp_version) { 2684 case TPACKET_V3: 2685 off = ph.h3->tp_net; 2686 break; 2687 case TPACKET_V2: 2688 off = ph.h2->tp_net; 2689 break; 2690 default: 2691 off = ph.h1->tp_net; 2692 break; 2693 } 2694 } else { 2695 switch (po->tp_version) { 2696 case TPACKET_V3: 2697 off = ph.h3->tp_mac; 2698 break; 2699 case TPACKET_V2: 2700 off = ph.h2->tp_mac; 2701 break; 2702 default: 2703 off = ph.h1->tp_mac; 2704 break; 2705 } 2706 } 2707 if (unlikely((off < off_min) || (off_max < off))) 2708 return -EINVAL; 2709 } else { 2710 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2711 } 2712 2713 *data = frame + off; 2714 return tp_len; 2715 } 2716 2717 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2718 { 2719 struct sk_buff *skb = NULL; 2720 struct net_device *dev; 2721 struct virtio_net_hdr *vnet_hdr = NULL; 2722 struct sockcm_cookie sockc; 2723 __be16 proto; 2724 int err, reserve = 0; 2725 void *ph; 2726 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2727 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2728 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2729 unsigned char *addr = NULL; 2730 int tp_len, size_max; 2731 void *data; 2732 int len_sum = 0; 2733 int status = TP_STATUS_AVAILABLE; 2734 int hlen, tlen, copylen = 0; 2735 long timeo = 0; 2736 2737 mutex_lock(&po->pg_vec_lock); 2738 2739 /* packet_sendmsg() check on tx_ring.pg_vec was lockless, 2740 * we need to confirm it under protection of pg_vec_lock. 2741 */ 2742 if (unlikely(!po->tx_ring.pg_vec)) { 2743 err = -EBUSY; 2744 goto out; 2745 } 2746 if (likely(saddr == NULL)) { 2747 dev = packet_cached_dev_get(po); 2748 proto = READ_ONCE(po->num); 2749 } else { 2750 err = -EINVAL; 2751 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2752 goto out; 2753 if (msg->msg_namelen < (saddr->sll_halen 2754 + offsetof(struct sockaddr_ll, 2755 sll_addr))) 2756 goto out; 2757 proto = saddr->sll_protocol; 2758 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2759 if (po->sk.sk_socket->type == SOCK_DGRAM) { 2760 if (dev && msg->msg_namelen < dev->addr_len + 2761 offsetof(struct sockaddr_ll, sll_addr)) 2762 goto out_put; 2763 addr = saddr->sll_addr; 2764 } 2765 } 2766 2767 err = -ENXIO; 2768 if (unlikely(dev == NULL)) 2769 goto out; 2770 err = -ENETDOWN; 2771 if (unlikely(!(dev->flags & IFF_UP))) 2772 goto out_put; 2773 2774 sockcm_init(&sockc, &po->sk); 2775 if (msg->msg_controllen) { 2776 err = sock_cmsg_send(&po->sk, msg, &sockc); 2777 if (unlikely(err)) 2778 goto out_put; 2779 } 2780 2781 if (po->sk.sk_socket->type == SOCK_RAW) 2782 reserve = dev->hard_header_len; 2783 size_max = po->tx_ring.frame_size 2784 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2785 2786 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz) 2787 size_max = dev->mtu + reserve + VLAN_HLEN; 2788 2789 reinit_completion(&po->skb_completion); 2790 2791 do { 2792 ph = packet_current_frame(po, &po->tx_ring, 2793 TP_STATUS_SEND_REQUEST); 2794 if (unlikely(ph == NULL)) { 2795 if (need_wait && skb) { 2796 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); 2797 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); 2798 if (timeo <= 0) { 2799 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; 2800 goto out_put; 2801 } 2802 } 2803 /* check for additional frames */ 2804 continue; 2805 } 2806 2807 skb = NULL; 2808 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2809 if (tp_len < 0) 2810 goto tpacket_error; 2811 2812 status = TP_STATUS_SEND_REQUEST; 2813 hlen = LL_RESERVED_SPACE(dev); 2814 tlen = dev->needed_tailroom; 2815 if (vnet_hdr_sz) { 2816 vnet_hdr = data; 2817 data += vnet_hdr_sz; 2818 tp_len -= vnet_hdr_sz; 2819 if (tp_len < 0 || 2820 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2821 tp_len = -EINVAL; 2822 goto tpacket_error; 2823 } 2824 copylen = __virtio16_to_cpu(vio_le(), 2825 vnet_hdr->hdr_len); 2826 } 2827 copylen = max_t(int, copylen, dev->hard_header_len); 2828 skb = sock_alloc_send_skb(&po->sk, 2829 hlen + tlen + sizeof(struct sockaddr_ll) + 2830 (copylen - dev->hard_header_len), 2831 !need_wait, &err); 2832 2833 if (unlikely(skb == NULL)) { 2834 /* we assume the socket was initially writeable ... */ 2835 if (likely(len_sum > 0)) 2836 err = len_sum; 2837 goto out_status; 2838 } 2839 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2840 addr, hlen, copylen, &sockc); 2841 if (likely(tp_len >= 0) && 2842 tp_len > dev->mtu + reserve && 2843 !vnet_hdr_sz && 2844 !packet_extra_vlan_len_allowed(dev, skb)) 2845 tp_len = -EMSGSIZE; 2846 2847 if (unlikely(tp_len < 0)) { 2848 tpacket_error: 2849 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) { 2850 __packet_set_status(po, ph, 2851 TP_STATUS_AVAILABLE); 2852 packet_increment_head(&po->tx_ring); 2853 kfree_skb(skb); 2854 continue; 2855 } else { 2856 status = TP_STATUS_WRONG_FORMAT; 2857 err = tp_len; 2858 goto out_status; 2859 } 2860 } 2861 2862 if (vnet_hdr_sz) { 2863 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { 2864 tp_len = -EINVAL; 2865 goto tpacket_error; 2866 } 2867 virtio_net_hdr_set_proto(skb, vnet_hdr); 2868 } 2869 2870 skb->destructor = tpacket_destruct_skb; 2871 __packet_set_status(po, ph, TP_STATUS_SENDING); 2872 packet_inc_pending(&po->tx_ring); 2873 2874 status = TP_STATUS_SEND_REQUEST; 2875 err = packet_xmit(po, skb); 2876 if (unlikely(err != 0)) { 2877 if (err > 0) 2878 err = net_xmit_errno(err); 2879 if (err && __packet_get_status(po, ph) == 2880 TP_STATUS_AVAILABLE) { 2881 /* skb was destructed already */ 2882 skb = NULL; 2883 goto out_status; 2884 } 2885 /* 2886 * skb was dropped but not destructed yet; 2887 * let's treat it like congestion or err < 0 2888 */ 2889 err = 0; 2890 } 2891 packet_increment_head(&po->tx_ring); 2892 len_sum += tp_len; 2893 } while (likely((ph != NULL) || 2894 /* Note: packet_read_pending() might be slow if we have 2895 * to call it as it's per_cpu variable, but in fast-path 2896 * we already short-circuit the loop with the first 2897 * condition, and luckily don't have to go that path 2898 * anyway. 2899 */ 2900 (need_wait && packet_read_pending(&po->tx_ring)))); 2901 2902 err = len_sum; 2903 goto out_put; 2904 2905 out_status: 2906 __packet_set_status(po, ph, status); 2907 kfree_skb(skb); 2908 out_put: 2909 dev_put(dev); 2910 out: 2911 mutex_unlock(&po->pg_vec_lock); 2912 return err; 2913 } 2914 2915 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2916 size_t reserve, size_t len, 2917 size_t linear, int noblock, 2918 int *err) 2919 { 2920 struct sk_buff *skb; 2921 2922 /* Under a page? Don't bother with paged skb. */ 2923 if (prepad + len < PAGE_SIZE || !linear) 2924 linear = len; 2925 2926 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 2927 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 2928 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2929 err, PAGE_ALLOC_COSTLY_ORDER); 2930 if (!skb) 2931 return NULL; 2932 2933 skb_reserve(skb, reserve); 2934 skb_put(skb, linear); 2935 skb->data_len = len - linear; 2936 skb->len += len - linear; 2937 2938 return skb; 2939 } 2940 2941 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2942 { 2943 struct sock *sk = sock->sk; 2944 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2945 struct sk_buff *skb; 2946 struct net_device *dev; 2947 __be16 proto; 2948 unsigned char *addr = NULL; 2949 int err, reserve = 0; 2950 struct sockcm_cookie sockc; 2951 struct virtio_net_hdr vnet_hdr = { 0 }; 2952 int offset = 0; 2953 struct packet_sock *po = pkt_sk(sk); 2954 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2955 int hlen, tlen, linear; 2956 int extra_len = 0; 2957 2958 /* 2959 * Get and verify the address. 2960 */ 2961 2962 if (likely(saddr == NULL)) { 2963 dev = packet_cached_dev_get(po); 2964 proto = READ_ONCE(po->num); 2965 } else { 2966 err = -EINVAL; 2967 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2968 goto out; 2969 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2970 goto out; 2971 proto = saddr->sll_protocol; 2972 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2973 if (sock->type == SOCK_DGRAM) { 2974 if (dev && msg->msg_namelen < dev->addr_len + 2975 offsetof(struct sockaddr_ll, sll_addr)) 2976 goto out_unlock; 2977 addr = saddr->sll_addr; 2978 } 2979 } 2980 2981 err = -ENXIO; 2982 if (unlikely(dev == NULL)) 2983 goto out_unlock; 2984 err = -ENETDOWN; 2985 if (unlikely(!(dev->flags & IFF_UP))) 2986 goto out_unlock; 2987 2988 sockcm_init(&sockc, sk); 2989 sockc.mark = READ_ONCE(sk->sk_mark); 2990 if (msg->msg_controllen) { 2991 err = sock_cmsg_send(sk, msg, &sockc); 2992 if (unlikely(err)) 2993 goto out_unlock; 2994 } 2995 2996 if (sock->type == SOCK_RAW) 2997 reserve = dev->hard_header_len; 2998 if (vnet_hdr_sz) { 2999 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz); 3000 if (err) 3001 goto out_unlock; 3002 } 3003 3004 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 3005 if (!netif_supports_nofcs(dev)) { 3006 err = -EPROTONOSUPPORT; 3007 goto out_unlock; 3008 } 3009 extra_len = 4; /* We're doing our own CRC */ 3010 } 3011 3012 err = -EMSGSIZE; 3013 if (!vnet_hdr.gso_type && 3014 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 3015 goto out_unlock; 3016 3017 err = -ENOBUFS; 3018 hlen = LL_RESERVED_SPACE(dev); 3019 tlen = dev->needed_tailroom; 3020 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 3021 linear = max(linear, min_t(int, len, dev->hard_header_len)); 3022 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 3023 msg->msg_flags & MSG_DONTWAIT, &err); 3024 if (skb == NULL) 3025 goto out_unlock; 3026 3027 skb_reset_network_header(skb); 3028 3029 err = -EINVAL; 3030 if (sock->type == SOCK_DGRAM) { 3031 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 3032 if (unlikely(offset < 0)) 3033 goto out_free; 3034 } else if (reserve) { 3035 skb_reserve(skb, -reserve); 3036 if (len < reserve + sizeof(struct ipv6hdr) && 3037 dev->min_header_len != dev->hard_header_len) 3038 skb_reset_network_header(skb); 3039 } 3040 3041 /* Returns -EFAULT on error */ 3042 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 3043 if (err) 3044 goto out_free; 3045 3046 if ((sock->type == SOCK_RAW && 3047 !dev_validate_header(dev, skb->data, len)) || !skb->len) { 3048 err = -EINVAL; 3049 goto out_free; 3050 } 3051 3052 skb_setup_tx_timestamp(skb, sockc.tsflags); 3053 3054 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 3055 !packet_extra_vlan_len_allowed(dev, skb)) { 3056 err = -EMSGSIZE; 3057 goto out_free; 3058 } 3059 3060 skb->protocol = proto; 3061 skb->dev = dev; 3062 skb->priority = READ_ONCE(sk->sk_priority); 3063 skb->mark = sockc.mark; 3064 skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); 3065 3066 if (unlikely(extra_len == 4)) 3067 skb->no_fcs = 1; 3068 3069 packet_parse_headers(skb, sock); 3070 3071 if (vnet_hdr_sz) { 3072 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 3073 if (err) 3074 goto out_free; 3075 len += vnet_hdr_sz; 3076 virtio_net_hdr_set_proto(skb, &vnet_hdr); 3077 } 3078 3079 err = packet_xmit(po, skb); 3080 3081 if (unlikely(err != 0)) { 3082 if (err > 0) 3083 err = net_xmit_errno(err); 3084 if (err) 3085 goto out_unlock; 3086 } 3087 3088 dev_put(dev); 3089 3090 return len; 3091 3092 out_free: 3093 kfree_skb(skb); 3094 out_unlock: 3095 dev_put(dev); 3096 out: 3097 return err; 3098 } 3099 3100 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 3101 { 3102 struct sock *sk = sock->sk; 3103 struct packet_sock *po = pkt_sk(sk); 3104 3105 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy. 3106 * tpacket_snd() will redo the check safely. 3107 */ 3108 if (data_race(po->tx_ring.pg_vec)) 3109 return tpacket_snd(po, msg); 3110 3111 return packet_snd(sock, msg, len); 3112 } 3113 3114 /* 3115 * Close a PACKET socket. This is fairly simple. We immediately go 3116 * to 'closed' state and remove our protocol entry in the device list. 3117 */ 3118 3119 static int packet_release(struct socket *sock) 3120 { 3121 struct sock *sk = sock->sk; 3122 struct packet_sock *po; 3123 struct packet_fanout *f; 3124 struct net *net; 3125 union tpacket_req_u req_u; 3126 3127 if (!sk) 3128 return 0; 3129 3130 net = sock_net(sk); 3131 po = pkt_sk(sk); 3132 3133 mutex_lock(&net->packet.sklist_lock); 3134 sk_del_node_init_rcu(sk); 3135 mutex_unlock(&net->packet.sklist_lock); 3136 3137 sock_prot_inuse_add(net, sk->sk_prot, -1); 3138 3139 spin_lock(&po->bind_lock); 3140 unregister_prot_hook(sk, false); 3141 packet_cached_dev_reset(po); 3142 3143 if (po->prot_hook.dev) { 3144 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker); 3145 po->prot_hook.dev = NULL; 3146 } 3147 spin_unlock(&po->bind_lock); 3148 3149 packet_flush_mclist(sk); 3150 3151 lock_sock(sk); 3152 if (po->rx_ring.pg_vec) { 3153 memset(&req_u, 0, sizeof(req_u)); 3154 packet_set_ring(sk, &req_u, 1, 0); 3155 } 3156 3157 if (po->tx_ring.pg_vec) { 3158 memset(&req_u, 0, sizeof(req_u)); 3159 packet_set_ring(sk, &req_u, 1, 1); 3160 } 3161 release_sock(sk); 3162 3163 f = fanout_release(sk); 3164 3165 synchronize_net(); 3166 3167 kfree(po->rollover); 3168 if (f) { 3169 fanout_release_data(f); 3170 kvfree(f); 3171 } 3172 /* 3173 * Now the socket is dead. No more input will appear. 3174 */ 3175 sock_orphan(sk); 3176 sock->sk = NULL; 3177 3178 /* Purge queues */ 3179 3180 skb_queue_purge(&sk->sk_receive_queue); 3181 packet_free_pending(po); 3182 3183 sock_put(sk); 3184 return 0; 3185 } 3186 3187 /* 3188 * Attach a packet hook. 3189 */ 3190 3191 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3192 __be16 proto) 3193 { 3194 struct packet_sock *po = pkt_sk(sk); 3195 struct net_device *dev = NULL; 3196 bool unlisted = false; 3197 bool need_rehook; 3198 int ret = 0; 3199 3200 lock_sock(sk); 3201 spin_lock(&po->bind_lock); 3202 if (!proto) 3203 proto = po->num; 3204 3205 rcu_read_lock(); 3206 3207 if (po->fanout) { 3208 ret = -EINVAL; 3209 goto out_unlock; 3210 } 3211 3212 if (name) { 3213 dev = dev_get_by_name_rcu(sock_net(sk), name); 3214 if (!dev) { 3215 ret = -ENODEV; 3216 goto out_unlock; 3217 } 3218 } else if (ifindex) { 3219 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3220 if (!dev) { 3221 ret = -ENODEV; 3222 goto out_unlock; 3223 } 3224 } 3225 3226 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev; 3227 3228 if (need_rehook) { 3229 dev_hold(dev); 3230 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 3231 rcu_read_unlock(); 3232 /* prevents packet_notifier() from calling 3233 * register_prot_hook() 3234 */ 3235 WRITE_ONCE(po->num, 0); 3236 __unregister_prot_hook(sk, true); 3237 rcu_read_lock(); 3238 if (dev) 3239 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3240 dev->ifindex); 3241 } 3242 3243 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING)); 3244 WRITE_ONCE(po->num, proto); 3245 po->prot_hook.type = proto; 3246 3247 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker); 3248 3249 if (unlikely(unlisted)) { 3250 po->prot_hook.dev = NULL; 3251 WRITE_ONCE(po->ifindex, -1); 3252 packet_cached_dev_reset(po); 3253 } else { 3254 netdev_hold(dev, &po->prot_hook.dev_tracker, 3255 GFP_ATOMIC); 3256 po->prot_hook.dev = dev; 3257 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); 3258 packet_cached_dev_assign(po, dev); 3259 } 3260 dev_put(dev); 3261 } 3262 3263 if (proto == 0 || !need_rehook) 3264 goto out_unlock; 3265 3266 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3267 register_prot_hook(sk); 3268 } else { 3269 sk->sk_err = ENETDOWN; 3270 if (!sock_flag(sk, SOCK_DEAD)) 3271 sk_error_report(sk); 3272 } 3273 3274 out_unlock: 3275 rcu_read_unlock(); 3276 spin_unlock(&po->bind_lock); 3277 release_sock(sk); 3278 return ret; 3279 } 3280 3281 /* 3282 * Bind a packet socket to a device 3283 */ 3284 3285 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3286 int addr_len) 3287 { 3288 struct sock *sk = sock->sk; 3289 char name[sizeof(uaddr->sa_data_min) + 1]; 3290 3291 /* 3292 * Check legality 3293 */ 3294 3295 if (addr_len != sizeof(struct sockaddr)) 3296 return -EINVAL; 3297 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3298 * zero-terminated. 3299 */ 3300 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); 3301 name[sizeof(uaddr->sa_data_min)] = 0; 3302 3303 return packet_do_bind(sk, name, 0, 0); 3304 } 3305 3306 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3307 { 3308 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3309 struct sock *sk = sock->sk; 3310 3311 /* 3312 * Check legality 3313 */ 3314 3315 if (addr_len < sizeof(struct sockaddr_ll)) 3316 return -EINVAL; 3317 if (sll->sll_family != AF_PACKET) 3318 return -EINVAL; 3319 3320 return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol); 3321 } 3322 3323 static struct proto packet_proto = { 3324 .name = "PACKET", 3325 .owner = THIS_MODULE, 3326 .obj_size = sizeof(struct packet_sock), 3327 }; 3328 3329 /* 3330 * Create a packet of type SOCK_PACKET. 3331 */ 3332 3333 static int packet_create(struct net *net, struct socket *sock, int protocol, 3334 int kern) 3335 { 3336 struct sock *sk; 3337 struct packet_sock *po; 3338 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3339 int err; 3340 3341 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3342 return -EPERM; 3343 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3344 sock->type != SOCK_PACKET) 3345 return -ESOCKTNOSUPPORT; 3346 3347 sock->state = SS_UNCONNECTED; 3348 3349 err = -ENOBUFS; 3350 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3351 if (sk == NULL) 3352 goto out; 3353 3354 sock->ops = &packet_ops; 3355 if (sock->type == SOCK_PACKET) 3356 sock->ops = &packet_ops_spkt; 3357 3358 sock_init_data(sock, sk); 3359 3360 po = pkt_sk(sk); 3361 init_completion(&po->skb_completion); 3362 sk->sk_family = PF_PACKET; 3363 po->num = proto; 3364 3365 err = packet_alloc_pending(po); 3366 if (err) 3367 goto out2; 3368 3369 packet_cached_dev_reset(po); 3370 3371 sk->sk_destruct = packet_sock_destruct; 3372 3373 /* 3374 * Attach a protocol block 3375 */ 3376 3377 spin_lock_init(&po->bind_lock); 3378 mutex_init(&po->pg_vec_lock); 3379 po->rollover = NULL; 3380 po->prot_hook.func = packet_rcv; 3381 3382 if (sock->type == SOCK_PACKET) 3383 po->prot_hook.func = packet_rcv_spkt; 3384 3385 po->prot_hook.af_packet_priv = sk; 3386 po->prot_hook.af_packet_net = sock_net(sk); 3387 3388 if (proto) { 3389 po->prot_hook.type = proto; 3390 __register_prot_hook(sk); 3391 } 3392 3393 mutex_lock(&net->packet.sklist_lock); 3394 sk_add_node_tail_rcu(sk, &net->packet.sklist); 3395 mutex_unlock(&net->packet.sklist_lock); 3396 3397 sock_prot_inuse_add(net, &packet_proto, 1); 3398 3399 return 0; 3400 out2: 3401 sk_free(sk); 3402 out: 3403 return err; 3404 } 3405 3406 /* 3407 * Pull a packet from our receive queue and hand it to the user. 3408 * If necessary we block. 3409 */ 3410 3411 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3412 int flags) 3413 { 3414 struct sock *sk = sock->sk; 3415 struct sk_buff *skb; 3416 int copied, err; 3417 int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz); 3418 unsigned int origlen = 0; 3419 3420 err = -EINVAL; 3421 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3422 goto out; 3423 3424 #if 0 3425 /* What error should we return now? EUNATTACH? */ 3426 if (pkt_sk(sk)->ifindex < 0) 3427 return -ENODEV; 3428 #endif 3429 3430 if (flags & MSG_ERRQUEUE) { 3431 err = sock_recv_errqueue(sk, msg, len, 3432 SOL_PACKET, PACKET_TX_TIMESTAMP); 3433 goto out; 3434 } 3435 3436 /* 3437 * Call the generic datagram receiver. This handles all sorts 3438 * of horrible races and re-entrancy so we can forget about it 3439 * in the protocol layers. 3440 * 3441 * Now it will return ENETDOWN, if device have just gone down, 3442 * but then it will block. 3443 */ 3444 3445 skb = skb_recv_datagram(sk, flags, &err); 3446 3447 /* 3448 * An error occurred so return it. Because skb_recv_datagram() 3449 * handles the blocking we don't see and worry about blocking 3450 * retries. 3451 */ 3452 3453 if (skb == NULL) 3454 goto out; 3455 3456 packet_rcv_try_clear_pressure(pkt_sk(sk)); 3457 3458 if (vnet_hdr_len) { 3459 err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len); 3460 if (err) 3461 goto out_free; 3462 } 3463 3464 /* You lose any data beyond the buffer you gave. If it worries 3465 * a user program they can ask the device for its MTU 3466 * anyway. 3467 */ 3468 copied = skb->len; 3469 if (copied > len) { 3470 copied = len; 3471 msg->msg_flags |= MSG_TRUNC; 3472 } 3473 3474 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3475 if (err) 3476 goto out_free; 3477 3478 if (sock->type != SOCK_PACKET) { 3479 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3480 3481 /* Original length was stored in sockaddr_ll fields */ 3482 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3483 sll->sll_family = AF_PACKET; 3484 sll->sll_protocol = skb->protocol; 3485 } 3486 3487 sock_recv_cmsgs(msg, sk, skb); 3488 3489 if (msg->msg_name) { 3490 const size_t max_len = min(sizeof(skb->cb), 3491 sizeof(struct sockaddr_storage)); 3492 int copy_len; 3493 3494 /* If the address length field is there to be filled 3495 * in, we fill it in now. 3496 */ 3497 if (sock->type == SOCK_PACKET) { 3498 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3499 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3500 copy_len = msg->msg_namelen; 3501 } else { 3502 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3503 3504 msg->msg_namelen = sll->sll_halen + 3505 offsetof(struct sockaddr_ll, sll_addr); 3506 copy_len = msg->msg_namelen; 3507 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { 3508 memset(msg->msg_name + 3509 offsetof(struct sockaddr_ll, sll_addr), 3510 0, sizeof(sll->sll_addr)); 3511 msg->msg_namelen = sizeof(struct sockaddr_ll); 3512 } 3513 } 3514 if (WARN_ON_ONCE(copy_len > max_len)) { 3515 copy_len = max_len; 3516 msg->msg_namelen = copy_len; 3517 } 3518 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); 3519 } 3520 3521 if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) { 3522 struct tpacket_auxdata aux; 3523 3524 aux.tp_status = TP_STATUS_USER; 3525 if (skb->ip_summed == CHECKSUM_PARTIAL) 3526 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3527 else if (skb->pkt_type != PACKET_OUTGOING && 3528 skb_csum_unnecessary(skb)) 3529 aux.tp_status |= TP_STATUS_CSUM_VALID; 3530 if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) 3531 aux.tp_status |= TP_STATUS_GSO_TCP; 3532 3533 aux.tp_len = origlen; 3534 aux.tp_snaplen = skb->len; 3535 aux.tp_mac = 0; 3536 aux.tp_net = skb_network_offset(skb); 3537 if (skb_vlan_tag_present(skb)) { 3538 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3539 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3540 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3541 } else { 3542 aux.tp_vlan_tci = 0; 3543 aux.tp_vlan_tpid = 0; 3544 } 3545 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3546 } 3547 3548 /* 3549 * Free or return the buffer as appropriate. Again this 3550 * hides all the races and re-entrancy issues from us. 3551 */ 3552 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3553 3554 out_free: 3555 skb_free_datagram(sk, skb); 3556 out: 3557 return err; 3558 } 3559 3560 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3561 int peer) 3562 { 3563 struct net_device *dev; 3564 struct sock *sk = sock->sk; 3565 3566 if (peer) 3567 return -EOPNOTSUPP; 3568 3569 uaddr->sa_family = AF_PACKET; 3570 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min)); 3571 rcu_read_lock(); 3572 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); 3573 if (dev) 3574 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min)); 3575 rcu_read_unlock(); 3576 3577 return sizeof(*uaddr); 3578 } 3579 3580 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3581 int peer) 3582 { 3583 struct net_device *dev; 3584 struct sock *sk = sock->sk; 3585 struct packet_sock *po = pkt_sk(sk); 3586 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3587 int ifindex; 3588 3589 if (peer) 3590 return -EOPNOTSUPP; 3591 3592 ifindex = READ_ONCE(po->ifindex); 3593 sll->sll_family = AF_PACKET; 3594 sll->sll_ifindex = ifindex; 3595 sll->sll_protocol = READ_ONCE(po->num); 3596 sll->sll_pkttype = 0; 3597 rcu_read_lock(); 3598 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3599 if (dev) { 3600 sll->sll_hatype = dev->type; 3601 sll->sll_halen = dev->addr_len; 3602 3603 /* Let __fortify_memcpy_chk() know the actual buffer size. */ 3604 memcpy(((struct sockaddr_storage *)sll)->__data + 3605 offsetof(struct sockaddr_ll, sll_addr) - 3606 offsetofend(struct sockaddr_ll, sll_family), 3607 dev->dev_addr, dev->addr_len); 3608 } else { 3609 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3610 sll->sll_halen = 0; 3611 } 3612 rcu_read_unlock(); 3613 3614 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3615 } 3616 3617 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3618 int what) 3619 { 3620 switch (i->type) { 3621 case PACKET_MR_MULTICAST: 3622 if (i->alen != dev->addr_len) 3623 return -EINVAL; 3624 if (what > 0) 3625 return dev_mc_add(dev, i->addr); 3626 else 3627 return dev_mc_del(dev, i->addr); 3628 break; 3629 case PACKET_MR_PROMISC: 3630 return dev_set_promiscuity(dev, what); 3631 case PACKET_MR_ALLMULTI: 3632 return dev_set_allmulti(dev, what); 3633 case PACKET_MR_UNICAST: 3634 if (i->alen != dev->addr_len) 3635 return -EINVAL; 3636 if (what > 0) 3637 return dev_uc_add(dev, i->addr); 3638 else 3639 return dev_uc_del(dev, i->addr); 3640 break; 3641 default: 3642 break; 3643 } 3644 return 0; 3645 } 3646 3647 static void packet_dev_mclist_delete(struct net_device *dev, 3648 struct packet_mclist **mlp) 3649 { 3650 struct packet_mclist *ml; 3651 3652 while ((ml = *mlp) != NULL) { 3653 if (ml->ifindex == dev->ifindex) { 3654 packet_dev_mc(dev, ml, -1); 3655 *mlp = ml->next; 3656 kfree(ml); 3657 } else 3658 mlp = &ml->next; 3659 } 3660 } 3661 3662 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3663 { 3664 struct packet_sock *po = pkt_sk(sk); 3665 struct packet_mclist *ml, *i; 3666 struct net_device *dev; 3667 int err; 3668 3669 rtnl_lock(); 3670 3671 err = -ENODEV; 3672 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3673 if (!dev) 3674 goto done; 3675 3676 err = -EINVAL; 3677 if (mreq->mr_alen > dev->addr_len) 3678 goto done; 3679 3680 err = -ENOBUFS; 3681 i = kmalloc(sizeof(*i), GFP_KERNEL); 3682 if (i == NULL) 3683 goto done; 3684 3685 err = 0; 3686 for (ml = po->mclist; ml; ml = ml->next) { 3687 if (ml->ifindex == mreq->mr_ifindex && 3688 ml->type == mreq->mr_type && 3689 ml->alen == mreq->mr_alen && 3690 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3691 ml->count++; 3692 /* Free the new element ... */ 3693 kfree(i); 3694 goto done; 3695 } 3696 } 3697 3698 i->type = mreq->mr_type; 3699 i->ifindex = mreq->mr_ifindex; 3700 i->alen = mreq->mr_alen; 3701 memcpy(i->addr, mreq->mr_address, i->alen); 3702 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3703 i->count = 1; 3704 i->next = po->mclist; 3705 po->mclist = i; 3706 err = packet_dev_mc(dev, i, 1); 3707 if (err) { 3708 po->mclist = i->next; 3709 kfree(i); 3710 } 3711 3712 done: 3713 rtnl_unlock(); 3714 return err; 3715 } 3716 3717 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3718 { 3719 struct packet_mclist *ml, **mlp; 3720 3721 rtnl_lock(); 3722 3723 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3724 if (ml->ifindex == mreq->mr_ifindex && 3725 ml->type == mreq->mr_type && 3726 ml->alen == mreq->mr_alen && 3727 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3728 if (--ml->count == 0) { 3729 struct net_device *dev; 3730 *mlp = ml->next; 3731 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3732 if (dev) 3733 packet_dev_mc(dev, ml, -1); 3734 kfree(ml); 3735 } 3736 break; 3737 } 3738 } 3739 rtnl_unlock(); 3740 return 0; 3741 } 3742 3743 static void packet_flush_mclist(struct sock *sk) 3744 { 3745 struct packet_sock *po = pkt_sk(sk); 3746 struct packet_mclist *ml; 3747 3748 if (!po->mclist) 3749 return; 3750 3751 rtnl_lock(); 3752 while ((ml = po->mclist) != NULL) { 3753 struct net_device *dev; 3754 3755 po->mclist = ml->next; 3756 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3757 if (dev != NULL) 3758 packet_dev_mc(dev, ml, -1); 3759 kfree(ml); 3760 } 3761 rtnl_unlock(); 3762 } 3763 3764 static int 3765 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, 3766 unsigned int optlen) 3767 { 3768 struct sock *sk = sock->sk; 3769 struct packet_sock *po = pkt_sk(sk); 3770 int ret; 3771 3772 if (level != SOL_PACKET) 3773 return -ENOPROTOOPT; 3774 3775 switch (optname) { 3776 case PACKET_ADD_MEMBERSHIP: 3777 case PACKET_DROP_MEMBERSHIP: 3778 { 3779 struct packet_mreq_max mreq; 3780 int len = optlen; 3781 memset(&mreq, 0, sizeof(mreq)); 3782 if (len < sizeof(struct packet_mreq)) 3783 return -EINVAL; 3784 if (len > sizeof(mreq)) 3785 len = sizeof(mreq); 3786 if (copy_from_sockptr(&mreq, optval, len)) 3787 return -EFAULT; 3788 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3789 return -EINVAL; 3790 if (optname == PACKET_ADD_MEMBERSHIP) 3791 ret = packet_mc_add(sk, &mreq); 3792 else 3793 ret = packet_mc_drop(sk, &mreq); 3794 return ret; 3795 } 3796 3797 case PACKET_RX_RING: 3798 case PACKET_TX_RING: 3799 { 3800 union tpacket_req_u req_u; 3801 3802 ret = -EINVAL; 3803 lock_sock(sk); 3804 switch (po->tp_version) { 3805 case TPACKET_V1: 3806 case TPACKET_V2: 3807 if (optlen < sizeof(req_u.req)) 3808 break; 3809 ret = copy_from_sockptr(&req_u.req, optval, 3810 sizeof(req_u.req)) ? 3811 -EINVAL : 0; 3812 break; 3813 case TPACKET_V3: 3814 default: 3815 if (optlen < sizeof(req_u.req3)) 3816 break; 3817 ret = copy_from_sockptr(&req_u.req3, optval, 3818 sizeof(req_u.req3)) ? 3819 -EINVAL : 0; 3820 break; 3821 } 3822 if (!ret) 3823 ret = packet_set_ring(sk, &req_u, 0, 3824 optname == PACKET_TX_RING); 3825 release_sock(sk); 3826 return ret; 3827 } 3828 case PACKET_COPY_THRESH: 3829 { 3830 int val; 3831 3832 if (optlen != sizeof(val)) 3833 return -EINVAL; 3834 if (copy_from_sockptr(&val, optval, sizeof(val))) 3835 return -EFAULT; 3836 3837 WRITE_ONCE(pkt_sk(sk)->copy_thresh, val); 3838 return 0; 3839 } 3840 case PACKET_VERSION: 3841 { 3842 int val; 3843 3844 if (optlen != sizeof(val)) 3845 return -EINVAL; 3846 if (copy_from_sockptr(&val, optval, sizeof(val))) 3847 return -EFAULT; 3848 switch (val) { 3849 case TPACKET_V1: 3850 case TPACKET_V2: 3851 case TPACKET_V3: 3852 break; 3853 default: 3854 return -EINVAL; 3855 } 3856 lock_sock(sk); 3857 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3858 ret = -EBUSY; 3859 } else { 3860 po->tp_version = val; 3861 ret = 0; 3862 } 3863 release_sock(sk); 3864 return ret; 3865 } 3866 case PACKET_RESERVE: 3867 { 3868 unsigned int val; 3869 3870 if (optlen != sizeof(val)) 3871 return -EINVAL; 3872 if (copy_from_sockptr(&val, optval, sizeof(val))) 3873 return -EFAULT; 3874 if (val > INT_MAX) 3875 return -EINVAL; 3876 lock_sock(sk); 3877 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3878 ret = -EBUSY; 3879 } else { 3880 po->tp_reserve = val; 3881 ret = 0; 3882 } 3883 release_sock(sk); 3884 return ret; 3885 } 3886 case PACKET_LOSS: 3887 { 3888 unsigned int val; 3889 3890 if (optlen != sizeof(val)) 3891 return -EINVAL; 3892 if (copy_from_sockptr(&val, optval, sizeof(val))) 3893 return -EFAULT; 3894 3895 lock_sock(sk); 3896 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3897 ret = -EBUSY; 3898 } else { 3899 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val); 3900 ret = 0; 3901 } 3902 release_sock(sk); 3903 return ret; 3904 } 3905 case PACKET_AUXDATA: 3906 { 3907 int val; 3908 3909 if (optlen < sizeof(val)) 3910 return -EINVAL; 3911 if (copy_from_sockptr(&val, optval, sizeof(val))) 3912 return -EFAULT; 3913 3914 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val); 3915 return 0; 3916 } 3917 case PACKET_ORIGDEV: 3918 { 3919 int val; 3920 3921 if (optlen < sizeof(val)) 3922 return -EINVAL; 3923 if (copy_from_sockptr(&val, optval, sizeof(val))) 3924 return -EFAULT; 3925 3926 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val); 3927 return 0; 3928 } 3929 case PACKET_VNET_HDR: 3930 case PACKET_VNET_HDR_SZ: 3931 { 3932 int val, hdr_len; 3933 3934 if (sock->type != SOCK_RAW) 3935 return -EINVAL; 3936 if (optlen < sizeof(val)) 3937 return -EINVAL; 3938 if (copy_from_sockptr(&val, optval, sizeof(val))) 3939 return -EFAULT; 3940 3941 if (optname == PACKET_VNET_HDR_SZ) { 3942 if (val && val != sizeof(struct virtio_net_hdr) && 3943 val != sizeof(struct virtio_net_hdr_mrg_rxbuf)) 3944 return -EINVAL; 3945 hdr_len = val; 3946 } else { 3947 hdr_len = val ? sizeof(struct virtio_net_hdr) : 0; 3948 } 3949 lock_sock(sk); 3950 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3951 ret = -EBUSY; 3952 } else { 3953 WRITE_ONCE(po->vnet_hdr_sz, hdr_len); 3954 ret = 0; 3955 } 3956 release_sock(sk); 3957 return ret; 3958 } 3959 case PACKET_TIMESTAMP: 3960 { 3961 int val; 3962 3963 if (optlen != sizeof(val)) 3964 return -EINVAL; 3965 if (copy_from_sockptr(&val, optval, sizeof(val))) 3966 return -EFAULT; 3967 3968 WRITE_ONCE(po->tp_tstamp, val); 3969 return 0; 3970 } 3971 case PACKET_FANOUT: 3972 { 3973 struct fanout_args args = { 0 }; 3974 3975 if (optlen != sizeof(int) && optlen != sizeof(args)) 3976 return -EINVAL; 3977 if (copy_from_sockptr(&args, optval, optlen)) 3978 return -EFAULT; 3979 3980 return fanout_add(sk, &args); 3981 } 3982 case PACKET_FANOUT_DATA: 3983 { 3984 /* Paired with the WRITE_ONCE() in fanout_add() */ 3985 if (!READ_ONCE(po->fanout)) 3986 return -EINVAL; 3987 3988 return fanout_set_data(po, optval, optlen); 3989 } 3990 case PACKET_IGNORE_OUTGOING: 3991 { 3992 int val; 3993 3994 if (optlen != sizeof(val)) 3995 return -EINVAL; 3996 if (copy_from_sockptr(&val, optval, sizeof(val))) 3997 return -EFAULT; 3998 if (val < 0 || val > 1) 3999 return -EINVAL; 4000 4001 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val); 4002 return 0; 4003 } 4004 case PACKET_TX_HAS_OFF: 4005 { 4006 unsigned int val; 4007 4008 if (optlen != sizeof(val)) 4009 return -EINVAL; 4010 if (copy_from_sockptr(&val, optval, sizeof(val))) 4011 return -EFAULT; 4012 4013 lock_sock(sk); 4014 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec) 4015 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val); 4016 4017 release_sock(sk); 4018 return 0; 4019 } 4020 case PACKET_QDISC_BYPASS: 4021 { 4022 int val; 4023 4024 if (optlen != sizeof(val)) 4025 return -EINVAL; 4026 if (copy_from_sockptr(&val, optval, sizeof(val))) 4027 return -EFAULT; 4028 4029 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val); 4030 return 0; 4031 } 4032 default: 4033 return -ENOPROTOOPT; 4034 } 4035 } 4036 4037 static int packet_getsockopt(struct socket *sock, int level, int optname, 4038 char __user *optval, int __user *optlen) 4039 { 4040 int len; 4041 int val, lv = sizeof(val); 4042 struct sock *sk = sock->sk; 4043 struct packet_sock *po = pkt_sk(sk); 4044 void *data = &val; 4045 union tpacket_stats_u st; 4046 struct tpacket_rollover_stats rstats; 4047 int drops; 4048 4049 if (level != SOL_PACKET) 4050 return -ENOPROTOOPT; 4051 4052 if (get_user(len, optlen)) 4053 return -EFAULT; 4054 4055 if (len < 0) 4056 return -EINVAL; 4057 4058 switch (optname) { 4059 case PACKET_STATISTICS: 4060 spin_lock_bh(&sk->sk_receive_queue.lock); 4061 memcpy(&st, &po->stats, sizeof(st)); 4062 memset(&po->stats, 0, sizeof(po->stats)); 4063 spin_unlock_bh(&sk->sk_receive_queue.lock); 4064 drops = atomic_xchg(&po->tp_drops, 0); 4065 4066 if (po->tp_version == TPACKET_V3) { 4067 lv = sizeof(struct tpacket_stats_v3); 4068 st.stats3.tp_drops = drops; 4069 st.stats3.tp_packets += drops; 4070 data = &st.stats3; 4071 } else { 4072 lv = sizeof(struct tpacket_stats); 4073 st.stats1.tp_drops = drops; 4074 st.stats1.tp_packets += drops; 4075 data = &st.stats1; 4076 } 4077 4078 break; 4079 case PACKET_AUXDATA: 4080 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA); 4081 break; 4082 case PACKET_ORIGDEV: 4083 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV); 4084 break; 4085 case PACKET_VNET_HDR: 4086 val = !!READ_ONCE(po->vnet_hdr_sz); 4087 break; 4088 case PACKET_VNET_HDR_SZ: 4089 val = READ_ONCE(po->vnet_hdr_sz); 4090 break; 4091 case PACKET_COPY_THRESH: 4092 val = READ_ONCE(pkt_sk(sk)->copy_thresh); 4093 break; 4094 case PACKET_VERSION: 4095 val = po->tp_version; 4096 break; 4097 case PACKET_HDRLEN: 4098 if (len > sizeof(int)) 4099 len = sizeof(int); 4100 if (len < sizeof(int)) 4101 return -EINVAL; 4102 if (copy_from_user(&val, optval, len)) 4103 return -EFAULT; 4104 switch (val) { 4105 case TPACKET_V1: 4106 val = sizeof(struct tpacket_hdr); 4107 break; 4108 case TPACKET_V2: 4109 val = sizeof(struct tpacket2_hdr); 4110 break; 4111 case TPACKET_V3: 4112 val = sizeof(struct tpacket3_hdr); 4113 break; 4114 default: 4115 return -EINVAL; 4116 } 4117 break; 4118 case PACKET_RESERVE: 4119 val = po->tp_reserve; 4120 break; 4121 case PACKET_LOSS: 4122 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS); 4123 break; 4124 case PACKET_TIMESTAMP: 4125 val = READ_ONCE(po->tp_tstamp); 4126 break; 4127 case PACKET_FANOUT: 4128 val = (po->fanout ? 4129 ((u32)po->fanout->id | 4130 ((u32)po->fanout->type << 16) | 4131 ((u32)po->fanout->flags << 24)) : 4132 0); 4133 break; 4134 case PACKET_IGNORE_OUTGOING: 4135 val = READ_ONCE(po->prot_hook.ignore_outgoing); 4136 break; 4137 case PACKET_ROLLOVER_STATS: 4138 if (!po->rollover) 4139 return -EINVAL; 4140 rstats.tp_all = atomic_long_read(&po->rollover->num); 4141 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 4142 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 4143 data = &rstats; 4144 lv = sizeof(rstats); 4145 break; 4146 case PACKET_TX_HAS_OFF: 4147 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF); 4148 break; 4149 case PACKET_QDISC_BYPASS: 4150 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS); 4151 break; 4152 default: 4153 return -ENOPROTOOPT; 4154 } 4155 4156 if (len > lv) 4157 len = lv; 4158 if (put_user(len, optlen)) 4159 return -EFAULT; 4160 if (copy_to_user(optval, data, len)) 4161 return -EFAULT; 4162 return 0; 4163 } 4164 4165 static int packet_notifier(struct notifier_block *this, 4166 unsigned long msg, void *ptr) 4167 { 4168 struct sock *sk; 4169 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4170 struct net *net = dev_net(dev); 4171 4172 rcu_read_lock(); 4173 sk_for_each_rcu(sk, &net->packet.sklist) { 4174 struct packet_sock *po = pkt_sk(sk); 4175 4176 switch (msg) { 4177 case NETDEV_UNREGISTER: 4178 if (po->mclist) 4179 packet_dev_mclist_delete(dev, &po->mclist); 4180 fallthrough; 4181 4182 case NETDEV_DOWN: 4183 if (dev->ifindex == po->ifindex) { 4184 spin_lock(&po->bind_lock); 4185 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 4186 __unregister_prot_hook(sk, false); 4187 sk->sk_err = ENETDOWN; 4188 if (!sock_flag(sk, SOCK_DEAD)) 4189 sk_error_report(sk); 4190 } 4191 if (msg == NETDEV_UNREGISTER) { 4192 packet_cached_dev_reset(po); 4193 WRITE_ONCE(po->ifindex, -1); 4194 netdev_put(po->prot_hook.dev, 4195 &po->prot_hook.dev_tracker); 4196 po->prot_hook.dev = NULL; 4197 } 4198 spin_unlock(&po->bind_lock); 4199 } 4200 break; 4201 case NETDEV_UP: 4202 if (dev->ifindex == po->ifindex) { 4203 spin_lock(&po->bind_lock); 4204 if (po->num) 4205 register_prot_hook(sk); 4206 spin_unlock(&po->bind_lock); 4207 } 4208 break; 4209 } 4210 } 4211 rcu_read_unlock(); 4212 return NOTIFY_DONE; 4213 } 4214 4215 4216 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4217 unsigned long arg) 4218 { 4219 struct sock *sk = sock->sk; 4220 4221 switch (cmd) { 4222 case SIOCOUTQ: 4223 { 4224 int amount = sk_wmem_alloc_get(sk); 4225 4226 return put_user(amount, (int __user *)arg); 4227 } 4228 case SIOCINQ: 4229 { 4230 struct sk_buff *skb; 4231 int amount = 0; 4232 4233 spin_lock_bh(&sk->sk_receive_queue.lock); 4234 skb = skb_peek(&sk->sk_receive_queue); 4235 if (skb) 4236 amount = skb->len; 4237 spin_unlock_bh(&sk->sk_receive_queue.lock); 4238 return put_user(amount, (int __user *)arg); 4239 } 4240 #ifdef CONFIG_INET 4241 case SIOCADDRT: 4242 case SIOCDELRT: 4243 case SIOCDARP: 4244 case SIOCGARP: 4245 case SIOCSARP: 4246 case SIOCGIFADDR: 4247 case SIOCSIFADDR: 4248 case SIOCGIFBRDADDR: 4249 case SIOCSIFBRDADDR: 4250 case SIOCGIFNETMASK: 4251 case SIOCSIFNETMASK: 4252 case SIOCGIFDSTADDR: 4253 case SIOCSIFDSTADDR: 4254 case SIOCSIFFLAGS: 4255 return inet_dgram_ops.ioctl(sock, cmd, arg); 4256 #endif 4257 4258 default: 4259 return -ENOIOCTLCMD; 4260 } 4261 return 0; 4262 } 4263 4264 static __poll_t packet_poll(struct file *file, struct socket *sock, 4265 poll_table *wait) 4266 { 4267 struct sock *sk = sock->sk; 4268 struct packet_sock *po = pkt_sk(sk); 4269 __poll_t mask = datagram_poll(file, sock, wait); 4270 4271 spin_lock_bh(&sk->sk_receive_queue.lock); 4272 if (po->rx_ring.pg_vec) { 4273 if (!packet_previous_rx_frame(po, &po->rx_ring, 4274 TP_STATUS_KERNEL)) 4275 mask |= EPOLLIN | EPOLLRDNORM; 4276 } 4277 packet_rcv_try_clear_pressure(po); 4278 spin_unlock_bh(&sk->sk_receive_queue.lock); 4279 spin_lock_bh(&sk->sk_write_queue.lock); 4280 if (po->tx_ring.pg_vec) { 4281 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4282 mask |= EPOLLOUT | EPOLLWRNORM; 4283 } 4284 spin_unlock_bh(&sk->sk_write_queue.lock); 4285 return mask; 4286 } 4287 4288 4289 /* Dirty? Well, I still did not learn better way to account 4290 * for user mmaps. 4291 */ 4292 4293 static void packet_mm_open(struct vm_area_struct *vma) 4294 { 4295 struct file *file = vma->vm_file; 4296 struct socket *sock = file->private_data; 4297 struct sock *sk = sock->sk; 4298 4299 if (sk) 4300 atomic_long_inc(&pkt_sk(sk)->mapped); 4301 } 4302 4303 static void packet_mm_close(struct vm_area_struct *vma) 4304 { 4305 struct file *file = vma->vm_file; 4306 struct socket *sock = file->private_data; 4307 struct sock *sk = sock->sk; 4308 4309 if (sk) 4310 atomic_long_dec(&pkt_sk(sk)->mapped); 4311 } 4312 4313 static const struct vm_operations_struct packet_mmap_ops = { 4314 .open = packet_mm_open, 4315 .close = packet_mm_close, 4316 }; 4317 4318 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4319 unsigned int len) 4320 { 4321 int i; 4322 4323 for (i = 0; i < len; i++) { 4324 if (likely(pg_vec[i].buffer)) { 4325 if (is_vmalloc_addr(pg_vec[i].buffer)) 4326 vfree(pg_vec[i].buffer); 4327 else 4328 free_pages((unsigned long)pg_vec[i].buffer, 4329 order); 4330 pg_vec[i].buffer = NULL; 4331 } 4332 } 4333 kfree(pg_vec); 4334 } 4335 4336 static char *alloc_one_pg_vec_page(unsigned long order) 4337 { 4338 char *buffer; 4339 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4340 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4341 4342 buffer = (char *) __get_free_pages(gfp_flags, order); 4343 if (buffer) 4344 return buffer; 4345 4346 /* __get_free_pages failed, fall back to vmalloc */ 4347 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); 4348 if (buffer) 4349 return buffer; 4350 4351 /* vmalloc failed, lets dig into swap here */ 4352 gfp_flags &= ~__GFP_NORETRY; 4353 buffer = (char *) __get_free_pages(gfp_flags, order); 4354 if (buffer) 4355 return buffer; 4356 4357 /* complete and utter failure */ 4358 return NULL; 4359 } 4360 4361 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4362 { 4363 unsigned int block_nr = req->tp_block_nr; 4364 struct pgv *pg_vec; 4365 int i; 4366 4367 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); 4368 if (unlikely(!pg_vec)) 4369 goto out; 4370 4371 for (i = 0; i < block_nr; i++) { 4372 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4373 if (unlikely(!pg_vec[i].buffer)) 4374 goto out_free_pgvec; 4375 } 4376 4377 out: 4378 return pg_vec; 4379 4380 out_free_pgvec: 4381 free_pg_vec(pg_vec, order, block_nr); 4382 pg_vec = NULL; 4383 goto out; 4384 } 4385 4386 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4387 int closing, int tx_ring) 4388 { 4389 struct pgv *pg_vec = NULL; 4390 struct packet_sock *po = pkt_sk(sk); 4391 unsigned long *rx_owner_map = NULL; 4392 int was_running, order = 0; 4393 struct packet_ring_buffer *rb; 4394 struct sk_buff_head *rb_queue; 4395 __be16 num; 4396 int err; 4397 /* Added to avoid minimal code churn */ 4398 struct tpacket_req *req = &req_u->req; 4399 4400 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4401 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4402 4403 err = -EBUSY; 4404 if (!closing) { 4405 if (atomic_long_read(&po->mapped)) 4406 goto out; 4407 if (packet_read_pending(rb)) 4408 goto out; 4409 } 4410 4411 if (req->tp_block_nr) { 4412 unsigned int min_frame_size; 4413 4414 /* Sanity tests and some calculations */ 4415 err = -EBUSY; 4416 if (unlikely(rb->pg_vec)) 4417 goto out; 4418 4419 switch (po->tp_version) { 4420 case TPACKET_V1: 4421 po->tp_hdrlen = TPACKET_HDRLEN; 4422 break; 4423 case TPACKET_V2: 4424 po->tp_hdrlen = TPACKET2_HDRLEN; 4425 break; 4426 case TPACKET_V3: 4427 po->tp_hdrlen = TPACKET3_HDRLEN; 4428 break; 4429 } 4430 4431 err = -EINVAL; 4432 if (unlikely((int)req->tp_block_size <= 0)) 4433 goto out; 4434 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4435 goto out; 4436 min_frame_size = po->tp_hdrlen + po->tp_reserve; 4437 if (po->tp_version >= TPACKET_V3 && 4438 req->tp_block_size < 4439 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) 4440 goto out; 4441 if (unlikely(req->tp_frame_size < min_frame_size)) 4442 goto out; 4443 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4444 goto out; 4445 4446 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4447 if (unlikely(rb->frames_per_block == 0)) 4448 goto out; 4449 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) 4450 goto out; 4451 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4452 req->tp_frame_nr)) 4453 goto out; 4454 4455 err = -ENOMEM; 4456 order = get_order(req->tp_block_size); 4457 pg_vec = alloc_pg_vec(req, order); 4458 if (unlikely(!pg_vec)) 4459 goto out; 4460 switch (po->tp_version) { 4461 case TPACKET_V3: 4462 /* Block transmit is not supported yet */ 4463 if (!tx_ring) { 4464 init_prb_bdqc(po, rb, pg_vec, req_u); 4465 } else { 4466 struct tpacket_req3 *req3 = &req_u->req3; 4467 4468 if (req3->tp_retire_blk_tov || 4469 req3->tp_sizeof_priv || 4470 req3->tp_feature_req_word) { 4471 err = -EINVAL; 4472 goto out_free_pg_vec; 4473 } 4474 } 4475 break; 4476 default: 4477 if (!tx_ring) { 4478 rx_owner_map = bitmap_alloc(req->tp_frame_nr, 4479 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 4480 if (!rx_owner_map) 4481 goto out_free_pg_vec; 4482 } 4483 break; 4484 } 4485 } 4486 /* Done */ 4487 else { 4488 err = -EINVAL; 4489 if (unlikely(req->tp_frame_nr)) 4490 goto out; 4491 } 4492 4493 4494 /* Detach socket from network */ 4495 spin_lock(&po->bind_lock); 4496 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING); 4497 num = po->num; 4498 if (was_running) { 4499 WRITE_ONCE(po->num, 0); 4500 __unregister_prot_hook(sk, false); 4501 } 4502 spin_unlock(&po->bind_lock); 4503 4504 synchronize_net(); 4505 4506 err = -EBUSY; 4507 mutex_lock(&po->pg_vec_lock); 4508 if (closing || atomic_long_read(&po->mapped) == 0) { 4509 err = 0; 4510 spin_lock_bh(&rb_queue->lock); 4511 swap(rb->pg_vec, pg_vec); 4512 if (po->tp_version <= TPACKET_V2) 4513 swap(rb->rx_owner_map, rx_owner_map); 4514 rb->frame_max = (req->tp_frame_nr - 1); 4515 rb->head = 0; 4516 rb->frame_size = req->tp_frame_size; 4517 spin_unlock_bh(&rb_queue->lock); 4518 4519 swap(rb->pg_vec_order, order); 4520 swap(rb->pg_vec_len, req->tp_block_nr); 4521 4522 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4523 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4524 tpacket_rcv : packet_rcv; 4525 skb_queue_purge(rb_queue); 4526 if (atomic_long_read(&po->mapped)) 4527 pr_err("packet_mmap: vma is busy: %ld\n", 4528 atomic_long_read(&po->mapped)); 4529 } 4530 mutex_unlock(&po->pg_vec_lock); 4531 4532 spin_lock(&po->bind_lock); 4533 if (was_running) { 4534 WRITE_ONCE(po->num, num); 4535 register_prot_hook(sk); 4536 } 4537 spin_unlock(&po->bind_lock); 4538 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4539 /* Because we don't support block-based V3 on tx-ring */ 4540 if (!tx_ring) 4541 prb_shutdown_retire_blk_timer(po, rb_queue); 4542 } 4543 4544 out_free_pg_vec: 4545 if (pg_vec) { 4546 bitmap_free(rx_owner_map); 4547 free_pg_vec(pg_vec, order, req->tp_block_nr); 4548 } 4549 out: 4550 return err; 4551 } 4552 4553 static int packet_mmap(struct file *file, struct socket *sock, 4554 struct vm_area_struct *vma) 4555 { 4556 struct sock *sk = sock->sk; 4557 struct packet_sock *po = pkt_sk(sk); 4558 unsigned long size, expected_size; 4559 struct packet_ring_buffer *rb; 4560 unsigned long start; 4561 int err = -EINVAL; 4562 int i; 4563 4564 if (vma->vm_pgoff) 4565 return -EINVAL; 4566 4567 mutex_lock(&po->pg_vec_lock); 4568 4569 expected_size = 0; 4570 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4571 if (rb->pg_vec) { 4572 expected_size += rb->pg_vec_len 4573 * rb->pg_vec_pages 4574 * PAGE_SIZE; 4575 } 4576 } 4577 4578 if (expected_size == 0) 4579 goto out; 4580 4581 size = vma->vm_end - vma->vm_start; 4582 if (size != expected_size) 4583 goto out; 4584 4585 start = vma->vm_start; 4586 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4587 if (rb->pg_vec == NULL) 4588 continue; 4589 4590 for (i = 0; i < rb->pg_vec_len; i++) { 4591 struct page *page; 4592 void *kaddr = rb->pg_vec[i].buffer; 4593 int pg_num; 4594 4595 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4596 page = pgv_to_page(kaddr); 4597 err = vm_insert_page(vma, start, page); 4598 if (unlikely(err)) 4599 goto out; 4600 start += PAGE_SIZE; 4601 kaddr += PAGE_SIZE; 4602 } 4603 } 4604 } 4605 4606 atomic_long_inc(&po->mapped); 4607 vma->vm_ops = &packet_mmap_ops; 4608 err = 0; 4609 4610 out: 4611 mutex_unlock(&po->pg_vec_lock); 4612 return err; 4613 } 4614 4615 static const struct proto_ops packet_ops_spkt = { 4616 .family = PF_PACKET, 4617 .owner = THIS_MODULE, 4618 .release = packet_release, 4619 .bind = packet_bind_spkt, 4620 .connect = sock_no_connect, 4621 .socketpair = sock_no_socketpair, 4622 .accept = sock_no_accept, 4623 .getname = packet_getname_spkt, 4624 .poll = datagram_poll, 4625 .ioctl = packet_ioctl, 4626 .gettstamp = sock_gettstamp, 4627 .listen = sock_no_listen, 4628 .shutdown = sock_no_shutdown, 4629 .sendmsg = packet_sendmsg_spkt, 4630 .recvmsg = packet_recvmsg, 4631 .mmap = sock_no_mmap, 4632 }; 4633 4634 static const struct proto_ops packet_ops = { 4635 .family = PF_PACKET, 4636 .owner = THIS_MODULE, 4637 .release = packet_release, 4638 .bind = packet_bind, 4639 .connect = sock_no_connect, 4640 .socketpair = sock_no_socketpair, 4641 .accept = sock_no_accept, 4642 .getname = packet_getname, 4643 .poll = packet_poll, 4644 .ioctl = packet_ioctl, 4645 .gettstamp = sock_gettstamp, 4646 .listen = sock_no_listen, 4647 .shutdown = sock_no_shutdown, 4648 .setsockopt = packet_setsockopt, 4649 .getsockopt = packet_getsockopt, 4650 .sendmsg = packet_sendmsg, 4651 .recvmsg = packet_recvmsg, 4652 .mmap = packet_mmap, 4653 }; 4654 4655 static const struct net_proto_family packet_family_ops = { 4656 .family = PF_PACKET, 4657 .create = packet_create, 4658 .owner = THIS_MODULE, 4659 }; 4660 4661 static struct notifier_block packet_netdev_notifier = { 4662 .notifier_call = packet_notifier, 4663 }; 4664 4665 #ifdef CONFIG_PROC_FS 4666 4667 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4668 __acquires(RCU) 4669 { 4670 struct net *net = seq_file_net(seq); 4671 4672 rcu_read_lock(); 4673 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4674 } 4675 4676 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4677 { 4678 struct net *net = seq_file_net(seq); 4679 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4680 } 4681 4682 static void packet_seq_stop(struct seq_file *seq, void *v) 4683 __releases(RCU) 4684 { 4685 rcu_read_unlock(); 4686 } 4687 4688 static int packet_seq_show(struct seq_file *seq, void *v) 4689 { 4690 if (v == SEQ_START_TOKEN) 4691 seq_printf(seq, 4692 "%*sRefCnt Type Proto Iface R Rmem User Inode\n", 4693 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk"); 4694 else { 4695 struct sock *s = sk_entry(v); 4696 const struct packet_sock *po = pkt_sk(s); 4697 4698 seq_printf(seq, 4699 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4700 s, 4701 refcount_read(&s->sk_refcnt), 4702 s->sk_type, 4703 ntohs(READ_ONCE(po->num)), 4704 READ_ONCE(po->ifindex), 4705 packet_sock_flag(po, PACKET_SOCK_RUNNING), 4706 atomic_read(&s->sk_rmem_alloc), 4707 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4708 sock_i_ino(s)); 4709 } 4710 4711 return 0; 4712 } 4713 4714 static const struct seq_operations packet_seq_ops = { 4715 .start = packet_seq_start, 4716 .next = packet_seq_next, 4717 .stop = packet_seq_stop, 4718 .show = packet_seq_show, 4719 }; 4720 #endif 4721 4722 static int __net_init packet_net_init(struct net *net) 4723 { 4724 mutex_init(&net->packet.sklist_lock); 4725 INIT_HLIST_HEAD(&net->packet.sklist); 4726 4727 #ifdef CONFIG_PROC_FS 4728 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4729 sizeof(struct seq_net_private))) 4730 return -ENOMEM; 4731 #endif /* CONFIG_PROC_FS */ 4732 4733 return 0; 4734 } 4735 4736 static void __net_exit packet_net_exit(struct net *net) 4737 { 4738 remove_proc_entry("packet", net->proc_net); 4739 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4740 } 4741 4742 static struct pernet_operations packet_net_ops = { 4743 .init = packet_net_init, 4744 .exit = packet_net_exit, 4745 }; 4746 4747 4748 static void __exit packet_exit(void) 4749 { 4750 sock_unregister(PF_PACKET); 4751 proto_unregister(&packet_proto); 4752 unregister_netdevice_notifier(&packet_netdev_notifier); 4753 unregister_pernet_subsys(&packet_net_ops); 4754 } 4755 4756 static int __init packet_init(void) 4757 { 4758 int rc; 4759 4760 rc = register_pernet_subsys(&packet_net_ops); 4761 if (rc) 4762 goto out; 4763 rc = register_netdevice_notifier(&packet_netdev_notifier); 4764 if (rc) 4765 goto out_pernet; 4766 rc = proto_register(&packet_proto, 0); 4767 if (rc) 4768 goto out_notifier; 4769 rc = sock_register(&packet_family_ops); 4770 if (rc) 4771 goto out_proto; 4772 4773 return 0; 4774 4775 out_proto: 4776 proto_unregister(&packet_proto); 4777 out_notifier: 4778 unregister_netdevice_notifier(&packet_netdev_notifier); 4779 out_pernet: 4780 unregister_pernet_subsys(&packet_net_ops); 4781 out: 4782 return rc; 4783 } 4784 4785 module_init(packet_init); 4786 module_exit(packet_exit); 4787 MODULE_DESCRIPTION("Packet socket support (AF_PACKET)"); 4788 MODULE_LICENSE("GPL"); 4789 MODULE_ALIAS_NETPROTO(PF_PACKET); 4790