1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * PACKET - implements raw packet sockets. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * 13 * Fixes: 14 * Alan Cox : verify_area() now used correctly 15 * Alan Cox : new skbuff lists, look ma no backlogs! 16 * Alan Cox : tidied skbuff lists. 17 * Alan Cox : Now uses generic datagram routines I 18 * added. Also fixed the peek/read crash 19 * from all old Linux datagram code. 20 * Alan Cox : Uses the improved datagram code. 21 * Alan Cox : Added NULL's for socket options. 22 * Alan Cox : Re-commented the code. 23 * Alan Cox : Use new kernel side addressing 24 * Rob Janssen : Correct MTU usage. 25 * Dave Platt : Counter leaks caused by incorrect 26 * interrupt locking and some slightly 27 * dubious gcc output. Can you read 28 * compiler: it said _VOLATILE_ 29 * Richard Kooijman : Timestamp fixes. 30 * Alan Cox : New buffers. Use sk->mac.raw. 31 * Alan Cox : sendmsg/recvmsg support. 32 * Alan Cox : Protocol setting support 33 * Alexey Kuznetsov : Untied from IPv4 stack. 34 * Cyrus Durgin : Fixed kerneld for kmod. 35 * Michal Ostrowski : Module initialization cleanup. 36 * Ulises Alonso : Frame number limit removal and 37 * packet_set_ring memory leak. 38 * Eric Biederman : Allow for > 8 byte hardware addresses. 39 * The convention is that longer addresses 40 * will simply extend the hardware address 41 * byte arrays at the end of sockaddr_ll 42 * and packet_mreq. 43 * Johann Baudy : Added TX RING. 44 * Chetan Loke : Implemented TPACKET_V3 block abstraction 45 * layer. 46 * Copyright (C) 2011, <lokec@ccs.neu.edu> 47 */ 48 49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 51 #include <linux/ethtool.h> 52 #include <linux/filter.h> 53 #include <linux/types.h> 54 #include <linux/mm.h> 55 #include <linux/capability.h> 56 #include <linux/fcntl.h> 57 #include <linux/socket.h> 58 #include <linux/in.h> 59 #include <linux/inet.h> 60 #include <linux/netdevice.h> 61 #include <linux/if_packet.h> 62 #include <linux/wireless.h> 63 #include <linux/kernel.h> 64 #include <linux/kmod.h> 65 #include <linux/slab.h> 66 #include <linux/vmalloc.h> 67 #include <net/net_namespace.h> 68 #include <net/ip.h> 69 #include <net/protocol.h> 70 #include <linux/skbuff.h> 71 #include <net/sock.h> 72 #include <linux/errno.h> 73 #include <linux/timer.h> 74 #include <linux/uaccess.h> 75 #include <asm/ioctls.h> 76 #include <asm/page.h> 77 #include <asm/cacheflush.h> 78 #include <asm/io.h> 79 #include <linux/proc_fs.h> 80 #include <linux/seq_file.h> 81 #include <linux/poll.h> 82 #include <linux/module.h> 83 #include <linux/init.h> 84 #include <linux/mutex.h> 85 #include <linux/if_vlan.h> 86 #include <linux/virtio_net.h> 87 #include <linux/errqueue.h> 88 #include <linux/net_tstamp.h> 89 #include <linux/percpu.h> 90 #ifdef CONFIG_INET 91 #include <net/inet_common.h> 92 #endif 93 #include <linux/bpf.h> 94 #include <net/compat.h> 95 #include <linux/netfilter_netdev.h> 96 97 #include "internal.h" 98 99 /* 100 Assumptions: 101 - If the device has no dev->header_ops->create, there is no LL header 102 visible above the device. In this case, its hard_header_len should be 0. 103 The device may prepend its own header internally. In this case, its 104 needed_headroom should be set to the space needed for it to add its 105 internal header. 106 For example, a WiFi driver pretending to be an Ethernet driver should 107 set its hard_header_len to be the Ethernet header length, and set its 108 needed_headroom to be (the real WiFi header length - the fake Ethernet 109 header length). 110 - packet socket receives packets with pulled ll header, 111 so that SOCK_RAW should push it back. 112 113 On receive: 114 ----------- 115 116 Incoming, dev_has_header(dev) == true 117 mac_header -> ll header 118 data -> data 119 120 Outgoing, dev_has_header(dev) == true 121 mac_header -> ll header 122 data -> ll header 123 124 Incoming, dev_has_header(dev) == false 125 mac_header -> data 126 However drivers often make it point to the ll header. 127 This is incorrect because the ll header should be invisible to us. 128 data -> data 129 130 Outgoing, dev_has_header(dev) == false 131 mac_header -> data. ll header is invisible to us. 132 data -> data 133 134 Resume 135 If dev_has_header(dev) == false we are unable to restore the ll header, 136 because it is invisible to us. 137 138 139 On transmit: 140 ------------ 141 142 dev_has_header(dev) == true 143 mac_header -> ll header 144 data -> ll header 145 146 dev_has_header(dev) == false (ll header is invisible to us) 147 mac_header -> data 148 data -> data 149 150 We should set network_header on output to the correct position, 151 packet classifier depends on it. 152 */ 153 154 /* Private packet socket structures. */ 155 156 /* identical to struct packet_mreq except it has 157 * a longer address field. 158 */ 159 struct packet_mreq_max { 160 int mr_ifindex; 161 unsigned short mr_type; 162 unsigned short mr_alen; 163 unsigned char mr_address[MAX_ADDR_LEN]; 164 }; 165 166 union tpacket_uhdr { 167 struct tpacket_hdr *h1; 168 struct tpacket2_hdr *h2; 169 struct tpacket3_hdr *h3; 170 void *raw; 171 }; 172 173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 174 int closing, int tx_ring); 175 176 #define V3_ALIGNMENT (8) 177 178 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 179 180 #define BLK_PLUS_PRIV(sz_of_priv) \ 181 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 182 183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 184 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 185 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 186 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 187 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 189 190 struct packet_sock; 191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 192 struct packet_type *pt, struct net_device *orig_dev); 193 194 static void *packet_previous_frame(struct packet_sock *po, 195 struct packet_ring_buffer *rb, 196 int status); 197 static void packet_increment_head(struct packet_ring_buffer *buff); 198 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 200 struct packet_sock *); 201 static void prb_retire_current_block(struct tpacket_kbdq_core *, 202 struct packet_sock *, unsigned int status); 203 static int prb_queue_frozen(struct tpacket_kbdq_core *); 204 static void prb_open_block(struct tpacket_kbdq_core *, 205 struct tpacket_block_desc *); 206 static void prb_retire_rx_blk_timer_expired(struct timer_list *); 207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); 208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 209 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 210 struct tpacket3_hdr *); 211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 212 struct tpacket3_hdr *); 213 static void packet_flush_mclist(struct sock *sk); 214 static u16 packet_pick_tx_queue(struct sk_buff *skb); 215 216 struct packet_skb_cb { 217 union { 218 struct sockaddr_pkt pkt; 219 union { 220 /* Trick: alias skb original length with 221 * ll.sll_family and ll.protocol in order 222 * to save room. 223 */ 224 unsigned int origlen; 225 struct sockaddr_ll ll; 226 }; 227 } sa; 228 }; 229 230 #define vio_le() virtio_legacy_is_little_endian() 231 232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 233 234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 235 #define GET_PBLOCK_DESC(x, bid) \ 236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 239 #define GET_NEXT_PRB_BLK_NUM(x) \ 240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 241 ((x)->kactive_blk_num+1) : 0) 242 243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 244 static void __fanout_link(struct sock *sk, struct packet_sock *po); 245 246 #ifdef CONFIG_NETFILTER_EGRESS 247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb) 248 { 249 struct sk_buff *next, *head = NULL, *tail; 250 int rc; 251 252 rcu_read_lock(); 253 for (; skb != NULL; skb = next) { 254 next = skb->next; 255 skb_mark_not_on_list(skb); 256 257 if (!nf_hook_egress(skb, &rc, skb->dev)) 258 continue; 259 260 if (!head) 261 head = skb; 262 else 263 tail->next = skb; 264 265 tail = skb; 266 } 267 rcu_read_unlock(); 268 269 return head; 270 } 271 #endif 272 273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb) 274 { 275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS)) 276 return dev_queue_xmit(skb); 277 278 #ifdef CONFIG_NETFILTER_EGRESS 279 if (nf_hook_egress_active()) { 280 skb = nf_hook_direct_egress(skb); 281 if (!skb) 282 return NET_XMIT_DROP; 283 } 284 #endif 285 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 286 } 287 288 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 289 { 290 struct net_device *dev; 291 292 rcu_read_lock(); 293 dev = rcu_dereference(po->cached_dev); 294 dev_hold(dev); 295 rcu_read_unlock(); 296 297 return dev; 298 } 299 300 static void packet_cached_dev_assign(struct packet_sock *po, 301 struct net_device *dev) 302 { 303 rcu_assign_pointer(po->cached_dev, dev); 304 } 305 306 static void packet_cached_dev_reset(struct packet_sock *po) 307 { 308 RCU_INIT_POINTER(po->cached_dev, NULL); 309 } 310 311 static u16 packet_pick_tx_queue(struct sk_buff *skb) 312 { 313 struct net_device *dev = skb->dev; 314 const struct net_device_ops *ops = dev->netdev_ops; 315 int cpu = raw_smp_processor_id(); 316 u16 queue_index; 317 318 #ifdef CONFIG_XPS 319 skb->sender_cpu = cpu + 1; 320 #endif 321 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); 322 if (ops->ndo_select_queue) { 323 queue_index = ops->ndo_select_queue(dev, skb, NULL); 324 queue_index = netdev_cap_txqueue(dev, queue_index); 325 } else { 326 queue_index = netdev_pick_tx(dev, skb, NULL); 327 } 328 329 return queue_index; 330 } 331 332 /* __register_prot_hook must be invoked through register_prot_hook 333 * or from a context in which asynchronous accesses to the packet 334 * socket is not possible (packet_create()). 335 */ 336 static void __register_prot_hook(struct sock *sk) 337 { 338 struct packet_sock *po = pkt_sk(sk); 339 340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 341 if (po->fanout) 342 __fanout_link(sk, po); 343 else 344 dev_add_pack(&po->prot_hook); 345 346 sock_hold(sk); 347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1); 348 } 349 } 350 351 static void register_prot_hook(struct sock *sk) 352 { 353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 354 __register_prot_hook(sk); 355 } 356 357 /* If the sync parameter is true, we will temporarily drop 358 * the po->bind_lock and do a synchronize_net to make sure no 359 * asynchronous packet processing paths still refer to the elements 360 * of po->prot_hook. If the sync parameter is false, it is the 361 * callers responsibility to take care of this. 362 */ 363 static void __unregister_prot_hook(struct sock *sk, bool sync) 364 { 365 struct packet_sock *po = pkt_sk(sk); 366 367 lockdep_assert_held_once(&po->bind_lock); 368 369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0); 370 371 if (po->fanout) 372 __fanout_unlink(sk, po); 373 else 374 __dev_remove_pack(&po->prot_hook); 375 376 __sock_put(sk); 377 378 if (sync) { 379 spin_unlock(&po->bind_lock); 380 synchronize_net(); 381 spin_lock(&po->bind_lock); 382 } 383 } 384 385 static void unregister_prot_hook(struct sock *sk, bool sync) 386 { 387 struct packet_sock *po = pkt_sk(sk); 388 389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) 390 __unregister_prot_hook(sk, sync); 391 } 392 393 static inline struct page * __pure pgv_to_page(void *addr) 394 { 395 if (is_vmalloc_addr(addr)) 396 return vmalloc_to_page(addr); 397 return virt_to_page(addr); 398 } 399 400 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 401 { 402 union tpacket_uhdr h; 403 404 /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ 405 406 h.raw = frame; 407 switch (po->tp_version) { 408 case TPACKET_V1: 409 WRITE_ONCE(h.h1->tp_status, status); 410 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 411 break; 412 case TPACKET_V2: 413 WRITE_ONCE(h.h2->tp_status, status); 414 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 415 break; 416 case TPACKET_V3: 417 WRITE_ONCE(h.h3->tp_status, status); 418 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 419 break; 420 default: 421 WARN(1, "TPACKET version not supported.\n"); 422 BUG(); 423 } 424 425 smp_wmb(); 426 } 427 428 static int __packet_get_status(const struct packet_sock *po, void *frame) 429 { 430 union tpacket_uhdr h; 431 432 smp_rmb(); 433 434 /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ 435 436 h.raw = frame; 437 switch (po->tp_version) { 438 case TPACKET_V1: 439 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 440 return READ_ONCE(h.h1->tp_status); 441 case TPACKET_V2: 442 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 443 return READ_ONCE(h.h2->tp_status); 444 case TPACKET_V3: 445 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 446 return READ_ONCE(h.h3->tp_status); 447 default: 448 WARN(1, "TPACKET version not supported.\n"); 449 BUG(); 450 return 0; 451 } 452 } 453 454 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, 455 unsigned int flags) 456 { 457 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 458 459 if (shhwtstamps && 460 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 461 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) 462 return TP_STATUS_TS_RAW_HARDWARE; 463 464 if ((flags & SOF_TIMESTAMPING_SOFTWARE) && 465 ktime_to_timespec64_cond(skb_tstamp(skb), ts)) 466 return TP_STATUS_TS_SOFTWARE; 467 468 return 0; 469 } 470 471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 472 struct sk_buff *skb) 473 { 474 union tpacket_uhdr h; 475 struct timespec64 ts; 476 __u32 ts_status; 477 478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp)))) 479 return 0; 480 481 h.raw = frame; 482 /* 483 * versions 1 through 3 overflow the timestamps in y2106, since they 484 * all store the seconds in a 32-bit unsigned integer. 485 * If we create a version 4, that should have a 64-bit timestamp, 486 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit 487 * nanoseconds. 488 */ 489 switch (po->tp_version) { 490 case TPACKET_V1: 491 h.h1->tp_sec = ts.tv_sec; 492 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 493 break; 494 case TPACKET_V2: 495 h.h2->tp_sec = ts.tv_sec; 496 h.h2->tp_nsec = ts.tv_nsec; 497 break; 498 case TPACKET_V3: 499 h.h3->tp_sec = ts.tv_sec; 500 h.h3->tp_nsec = ts.tv_nsec; 501 break; 502 default: 503 WARN(1, "TPACKET version not supported.\n"); 504 BUG(); 505 } 506 507 /* one flush is safe, as both fields always lie on the same cacheline */ 508 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 509 smp_wmb(); 510 511 return ts_status; 512 } 513 514 static void *packet_lookup_frame(const struct packet_sock *po, 515 const struct packet_ring_buffer *rb, 516 unsigned int position, 517 int status) 518 { 519 unsigned int pg_vec_pos, frame_offset; 520 union tpacket_uhdr h; 521 522 pg_vec_pos = position / rb->frames_per_block; 523 frame_offset = position % rb->frames_per_block; 524 525 h.raw = rb->pg_vec[pg_vec_pos].buffer + 526 (frame_offset * rb->frame_size); 527 528 if (status != __packet_get_status(po, h.raw)) 529 return NULL; 530 531 return h.raw; 532 } 533 534 static void *packet_current_frame(struct packet_sock *po, 535 struct packet_ring_buffer *rb, 536 int status) 537 { 538 return packet_lookup_frame(po, rb, rb->head, status); 539 } 540 541 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 542 { 543 del_timer_sync(&pkc->retire_blk_timer); 544 } 545 546 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 547 struct sk_buff_head *rb_queue) 548 { 549 struct tpacket_kbdq_core *pkc; 550 551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 552 553 spin_lock_bh(&rb_queue->lock); 554 pkc->delete_blk_timer = 1; 555 spin_unlock_bh(&rb_queue->lock); 556 557 prb_del_retire_blk_timer(pkc); 558 } 559 560 static void prb_setup_retire_blk_timer(struct packet_sock *po) 561 { 562 struct tpacket_kbdq_core *pkc; 563 564 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 565 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired, 566 0); 567 pkc->retire_blk_timer.expires = jiffies; 568 } 569 570 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 571 int blk_size_in_bytes) 572 { 573 struct net_device *dev; 574 unsigned int mbits, div; 575 struct ethtool_link_ksettings ecmd; 576 int err; 577 578 rtnl_lock(); 579 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 580 if (unlikely(!dev)) { 581 rtnl_unlock(); 582 return DEFAULT_PRB_RETIRE_TOV; 583 } 584 err = __ethtool_get_link_ksettings(dev, &ecmd); 585 rtnl_unlock(); 586 if (err) 587 return DEFAULT_PRB_RETIRE_TOV; 588 589 /* If the link speed is so slow you don't really 590 * need to worry about perf anyways 591 */ 592 if (ecmd.base.speed < SPEED_1000 || 593 ecmd.base.speed == SPEED_UNKNOWN) 594 return DEFAULT_PRB_RETIRE_TOV; 595 596 div = ecmd.base.speed / 1000; 597 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 598 599 if (div) 600 mbits /= div; 601 602 if (div) 603 return mbits + 1; 604 return mbits; 605 } 606 607 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 608 union tpacket_req_u *req_u) 609 { 610 p1->feature_req_word = req_u->req3.tp_feature_req_word; 611 } 612 613 static void init_prb_bdqc(struct packet_sock *po, 614 struct packet_ring_buffer *rb, 615 struct pgv *pg_vec, 616 union tpacket_req_u *req_u) 617 { 618 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 619 struct tpacket_block_desc *pbd; 620 621 memset(p1, 0x0, sizeof(*p1)); 622 623 p1->knxt_seq_num = 1; 624 p1->pkbdq = pg_vec; 625 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 626 p1->pkblk_start = pg_vec[0].buffer; 627 p1->kblk_size = req_u->req3.tp_block_size; 628 p1->knum_blocks = req_u->req3.tp_block_nr; 629 p1->hdrlen = po->tp_hdrlen; 630 p1->version = po->tp_version; 631 p1->last_kactive_blk_num = 0; 632 po->stats.stats3.tp_freeze_q_cnt = 0; 633 if (req_u->req3.tp_retire_blk_tov) 634 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; 635 else 636 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, 637 req_u->req3.tp_block_size); 638 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); 639 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 640 rwlock_init(&p1->blk_fill_in_prog_lock); 641 642 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 643 prb_init_ft_ops(p1, req_u); 644 prb_setup_retire_blk_timer(po); 645 prb_open_block(p1, pbd); 646 } 647 648 /* Do NOT update the last_blk_num first. 649 * Assumes sk_buff_head lock is held. 650 */ 651 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) 652 { 653 mod_timer(&pkc->retire_blk_timer, 654 jiffies + pkc->tov_in_jiffies); 655 pkc->last_kactive_blk_num = pkc->kactive_blk_num; 656 } 657 658 /* 659 * Timer logic: 660 * 1) We refresh the timer only when we open a block. 661 * By doing this we don't waste cycles refreshing the timer 662 * on packet-by-packet basis. 663 * 664 * With a 1MB block-size, on a 1Gbps line, it will take 665 * i) ~8 ms to fill a block + ii) memcpy etc. 666 * In this cut we are not accounting for the memcpy time. 667 * 668 * So, if the user sets the 'tmo' to 10ms then the timer 669 * will never fire while the block is still getting filled 670 * (which is what we want). However, the user could choose 671 * to close a block early and that's fine. 672 * 673 * But when the timer does fire, we check whether or not to refresh it. 674 * Since the tmo granularity is in msecs, it is not too expensive 675 * to refresh the timer, lets say every '8' msecs. 676 * Either the user can set the 'tmo' or we can derive it based on 677 * a) line-speed and b) block-size. 678 * prb_calc_retire_blk_tmo() calculates the tmo. 679 * 680 */ 681 static void prb_retire_rx_blk_timer_expired(struct timer_list *t) 682 { 683 struct packet_sock *po = 684 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer); 685 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 686 unsigned int frozen; 687 struct tpacket_block_desc *pbd; 688 689 spin_lock(&po->sk.sk_receive_queue.lock); 690 691 frozen = prb_queue_frozen(pkc); 692 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 693 694 if (unlikely(pkc->delete_blk_timer)) 695 goto out; 696 697 /* We only need to plug the race when the block is partially filled. 698 * tpacket_rcv: 699 * lock(); increment BLOCK_NUM_PKTS; unlock() 700 * copy_bits() is in progress ... 701 * timer fires on other cpu: 702 * we can't retire the current block because copy_bits 703 * is in progress. 704 * 705 */ 706 if (BLOCK_NUM_PKTS(pbd)) { 707 /* Waiting for skb_copy_bits to finish... */ 708 write_lock(&pkc->blk_fill_in_prog_lock); 709 write_unlock(&pkc->blk_fill_in_prog_lock); 710 } 711 712 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { 713 if (!frozen) { 714 if (!BLOCK_NUM_PKTS(pbd)) { 715 /* An empty block. Just refresh the timer. */ 716 goto refresh_timer; 717 } 718 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 719 if (!prb_dispatch_next_block(pkc, po)) 720 goto refresh_timer; 721 else 722 goto out; 723 } else { 724 /* Case 1. Queue was frozen because user-space was 725 * lagging behind. 726 */ 727 if (prb_curr_blk_in_use(pbd)) { 728 /* 729 * Ok, user-space is still behind. 730 * So just refresh the timer. 731 */ 732 goto refresh_timer; 733 } else { 734 /* Case 2. queue was frozen,user-space caught up, 735 * now the link went idle && the timer fired. 736 * We don't have a block to close.So we open this 737 * block and restart the timer. 738 * opening a block thaws the queue,restarts timer 739 * Thawing/timer-refresh is a side effect. 740 */ 741 prb_open_block(pkc, pbd); 742 goto out; 743 } 744 } 745 } 746 747 refresh_timer: 748 _prb_refresh_rx_retire_blk_timer(pkc); 749 750 out: 751 spin_unlock(&po->sk.sk_receive_queue.lock); 752 } 753 754 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 755 struct tpacket_block_desc *pbd1, __u32 status) 756 { 757 /* Flush everything minus the block header */ 758 759 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 760 u8 *start, *end; 761 762 start = (u8 *)pbd1; 763 764 /* Skip the block header(we know header WILL fit in 4K) */ 765 start += PAGE_SIZE; 766 767 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 768 for (; start < end; start += PAGE_SIZE) 769 flush_dcache_page(pgv_to_page(start)); 770 771 smp_wmb(); 772 #endif 773 774 /* Now update the block status. */ 775 776 BLOCK_STATUS(pbd1) = status; 777 778 /* Flush the block header */ 779 780 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 781 start = (u8 *)pbd1; 782 flush_dcache_page(pgv_to_page(start)); 783 784 smp_wmb(); 785 #endif 786 } 787 788 /* 789 * Side effect: 790 * 791 * 1) flush the block 792 * 2) Increment active_blk_num 793 * 794 * Note:We DONT refresh the timer on purpose. 795 * Because almost always the next block will be opened. 796 */ 797 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 798 struct tpacket_block_desc *pbd1, 799 struct packet_sock *po, unsigned int stat) 800 { 801 __u32 status = TP_STATUS_USER | stat; 802 803 struct tpacket3_hdr *last_pkt; 804 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 805 struct sock *sk = &po->sk; 806 807 if (atomic_read(&po->tp_drops)) 808 status |= TP_STATUS_LOSING; 809 810 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 811 last_pkt->tp_next_offset = 0; 812 813 /* Get the ts of the last pkt */ 814 if (BLOCK_NUM_PKTS(pbd1)) { 815 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 816 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 817 } else { 818 /* Ok, we tmo'd - so get the current time. 819 * 820 * It shouldn't really happen as we don't close empty 821 * blocks. See prb_retire_rx_blk_timer_expired(). 822 */ 823 struct timespec64 ts; 824 ktime_get_real_ts64(&ts); 825 h1->ts_last_pkt.ts_sec = ts.tv_sec; 826 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 827 } 828 829 smp_wmb(); 830 831 /* Flush the block */ 832 prb_flush_block(pkc1, pbd1, status); 833 834 sk->sk_data_ready(sk); 835 836 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 837 } 838 839 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 840 { 841 pkc->reset_pending_on_curr_blk = 0; 842 } 843 844 /* 845 * Side effect of opening a block: 846 * 847 * 1) prb_queue is thawed. 848 * 2) retire_blk_timer is refreshed. 849 * 850 */ 851 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 852 struct tpacket_block_desc *pbd1) 853 { 854 struct timespec64 ts; 855 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 856 857 smp_rmb(); 858 859 /* We could have just memset this but we will lose the 860 * flexibility of making the priv area sticky 861 */ 862 863 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 864 BLOCK_NUM_PKTS(pbd1) = 0; 865 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 866 867 ktime_get_real_ts64(&ts); 868 869 h1->ts_first_pkt.ts_sec = ts.tv_sec; 870 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 871 872 pkc1->pkblk_start = (char *)pbd1; 873 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 874 875 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 876 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 877 878 pbd1->version = pkc1->version; 879 pkc1->prev = pkc1->nxt_offset; 880 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 881 882 prb_thaw_queue(pkc1); 883 _prb_refresh_rx_retire_blk_timer(pkc1); 884 885 smp_wmb(); 886 } 887 888 /* 889 * Queue freeze logic: 890 * 1) Assume tp_block_nr = 8 blocks. 891 * 2) At time 't0', user opens Rx ring. 892 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 893 * 4) user-space is either sleeping or processing block '0'. 894 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 895 * it will close block-7,loop around and try to fill block '0'. 896 * call-flow: 897 * __packet_lookup_frame_in_block 898 * prb_retire_current_block() 899 * prb_dispatch_next_block() 900 * |->(BLOCK_STATUS == USER) evaluates to true 901 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 902 * 6) Now there are two cases: 903 * 6.1) Link goes idle right after the queue is frozen. 904 * But remember, the last open_block() refreshed the timer. 905 * When this timer expires,it will refresh itself so that we can 906 * re-open block-0 in near future. 907 * 6.2) Link is busy and keeps on receiving packets. This is a simple 908 * case and __packet_lookup_frame_in_block will check if block-0 909 * is free and can now be re-used. 910 */ 911 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 912 struct packet_sock *po) 913 { 914 pkc->reset_pending_on_curr_blk = 1; 915 po->stats.stats3.tp_freeze_q_cnt++; 916 } 917 918 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 919 920 /* 921 * If the next block is free then we will dispatch it 922 * and return a good offset. 923 * Else, we will freeze the queue. 924 * So, caller must check the return value. 925 */ 926 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 927 struct packet_sock *po) 928 { 929 struct tpacket_block_desc *pbd; 930 931 smp_rmb(); 932 933 /* 1. Get current block num */ 934 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 935 936 /* 2. If this block is currently in_use then freeze the queue */ 937 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 938 prb_freeze_queue(pkc, po); 939 return NULL; 940 } 941 942 /* 943 * 3. 944 * open this block and return the offset where the first packet 945 * needs to get stored. 946 */ 947 prb_open_block(pkc, pbd); 948 return (void *)pkc->nxt_offset; 949 } 950 951 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 952 struct packet_sock *po, unsigned int status) 953 { 954 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 955 956 /* retire/close the current block */ 957 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 958 /* 959 * Plug the case where copy_bits() is in progress on 960 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 961 * have space to copy the pkt in the current block and 962 * called prb_retire_current_block() 963 * 964 * We don't need to worry about the TMO case because 965 * the timer-handler already handled this case. 966 */ 967 if (!(status & TP_STATUS_BLK_TMO)) { 968 /* Waiting for skb_copy_bits to finish... */ 969 write_lock(&pkc->blk_fill_in_prog_lock); 970 write_unlock(&pkc->blk_fill_in_prog_lock); 971 } 972 prb_close_block(pkc, pbd, po, status); 973 return; 974 } 975 } 976 977 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 978 { 979 return TP_STATUS_USER & BLOCK_STATUS(pbd); 980 } 981 982 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 983 { 984 return pkc->reset_pending_on_curr_blk; 985 } 986 987 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 988 __releases(&pkc->blk_fill_in_prog_lock) 989 { 990 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 991 992 read_unlock(&pkc->blk_fill_in_prog_lock); 993 } 994 995 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 996 struct tpacket3_hdr *ppd) 997 { 998 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 999 } 1000 1001 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 1002 struct tpacket3_hdr *ppd) 1003 { 1004 ppd->hv1.tp_rxhash = 0; 1005 } 1006 1007 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 1008 struct tpacket3_hdr *ppd) 1009 { 1010 if (skb_vlan_tag_present(pkc->skb)) { 1011 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 1012 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 1013 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1014 } else { 1015 ppd->hv1.tp_vlan_tci = 0; 1016 ppd->hv1.tp_vlan_tpid = 0; 1017 ppd->tp_status = TP_STATUS_AVAILABLE; 1018 } 1019 } 1020 1021 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 1022 struct tpacket3_hdr *ppd) 1023 { 1024 ppd->hv1.tp_padding = 0; 1025 prb_fill_vlan_info(pkc, ppd); 1026 1027 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 1028 prb_fill_rxhash(pkc, ppd); 1029 else 1030 prb_clear_rxhash(pkc, ppd); 1031 } 1032 1033 static void prb_fill_curr_block(char *curr, 1034 struct tpacket_kbdq_core *pkc, 1035 struct tpacket_block_desc *pbd, 1036 unsigned int len) 1037 __acquires(&pkc->blk_fill_in_prog_lock) 1038 { 1039 struct tpacket3_hdr *ppd; 1040 1041 ppd = (struct tpacket3_hdr *)curr; 1042 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1043 pkc->prev = curr; 1044 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1045 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1046 BLOCK_NUM_PKTS(pbd) += 1; 1047 read_lock(&pkc->blk_fill_in_prog_lock); 1048 prb_run_all_ft_ops(pkc, ppd); 1049 } 1050 1051 /* Assumes caller has the sk->rx_queue.lock */ 1052 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1053 struct sk_buff *skb, 1054 unsigned int len 1055 ) 1056 { 1057 struct tpacket_kbdq_core *pkc; 1058 struct tpacket_block_desc *pbd; 1059 char *curr, *end; 1060 1061 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1063 1064 /* Queue is frozen when user space is lagging behind */ 1065 if (prb_queue_frozen(pkc)) { 1066 /* 1067 * Check if that last block which caused the queue to freeze, 1068 * is still in_use by user-space. 1069 */ 1070 if (prb_curr_blk_in_use(pbd)) { 1071 /* Can't record this packet */ 1072 return NULL; 1073 } else { 1074 /* 1075 * Ok, the block was released by user-space. 1076 * Now let's open that block. 1077 * opening a block also thaws the queue. 1078 * Thawing is a side effect. 1079 */ 1080 prb_open_block(pkc, pbd); 1081 } 1082 } 1083 1084 smp_mb(); 1085 curr = pkc->nxt_offset; 1086 pkc->skb = skb; 1087 end = (char *)pbd + pkc->kblk_size; 1088 1089 /* first try the current block */ 1090 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1091 prb_fill_curr_block(curr, pkc, pbd, len); 1092 return (void *)curr; 1093 } 1094 1095 /* Ok, close the current block */ 1096 prb_retire_current_block(pkc, po, 0); 1097 1098 /* Now, try to dispatch the next block */ 1099 curr = (char *)prb_dispatch_next_block(pkc, po); 1100 if (curr) { 1101 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1102 prb_fill_curr_block(curr, pkc, pbd, len); 1103 return (void *)curr; 1104 } 1105 1106 /* 1107 * No free blocks are available.user_space hasn't caught up yet. 1108 * Queue was just frozen and now this packet will get dropped. 1109 */ 1110 return NULL; 1111 } 1112 1113 static void *packet_current_rx_frame(struct packet_sock *po, 1114 struct sk_buff *skb, 1115 int status, unsigned int len) 1116 { 1117 char *curr = NULL; 1118 switch (po->tp_version) { 1119 case TPACKET_V1: 1120 case TPACKET_V2: 1121 curr = packet_lookup_frame(po, &po->rx_ring, 1122 po->rx_ring.head, status); 1123 return curr; 1124 case TPACKET_V3: 1125 return __packet_lookup_frame_in_block(po, skb, len); 1126 default: 1127 WARN(1, "TPACKET version not supported\n"); 1128 BUG(); 1129 return NULL; 1130 } 1131 } 1132 1133 static void *prb_lookup_block(const struct packet_sock *po, 1134 const struct packet_ring_buffer *rb, 1135 unsigned int idx, 1136 int status) 1137 { 1138 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1139 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1140 1141 if (status != BLOCK_STATUS(pbd)) 1142 return NULL; 1143 return pbd; 1144 } 1145 1146 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1147 { 1148 unsigned int prev; 1149 if (rb->prb_bdqc.kactive_blk_num) 1150 prev = rb->prb_bdqc.kactive_blk_num-1; 1151 else 1152 prev = rb->prb_bdqc.knum_blocks-1; 1153 return prev; 1154 } 1155 1156 /* Assumes caller has held the rx_queue.lock */ 1157 static void *__prb_previous_block(struct packet_sock *po, 1158 struct packet_ring_buffer *rb, 1159 int status) 1160 { 1161 unsigned int previous = prb_previous_blk_num(rb); 1162 return prb_lookup_block(po, rb, previous, status); 1163 } 1164 1165 static void *packet_previous_rx_frame(struct packet_sock *po, 1166 struct packet_ring_buffer *rb, 1167 int status) 1168 { 1169 if (po->tp_version <= TPACKET_V2) 1170 return packet_previous_frame(po, rb, status); 1171 1172 return __prb_previous_block(po, rb, status); 1173 } 1174 1175 static void packet_increment_rx_head(struct packet_sock *po, 1176 struct packet_ring_buffer *rb) 1177 { 1178 switch (po->tp_version) { 1179 case TPACKET_V1: 1180 case TPACKET_V2: 1181 return packet_increment_head(rb); 1182 case TPACKET_V3: 1183 default: 1184 WARN(1, "TPACKET version not supported.\n"); 1185 BUG(); 1186 return; 1187 } 1188 } 1189 1190 static void *packet_previous_frame(struct packet_sock *po, 1191 struct packet_ring_buffer *rb, 1192 int status) 1193 { 1194 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1195 return packet_lookup_frame(po, rb, previous, status); 1196 } 1197 1198 static void packet_increment_head(struct packet_ring_buffer *buff) 1199 { 1200 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1201 } 1202 1203 static void packet_inc_pending(struct packet_ring_buffer *rb) 1204 { 1205 this_cpu_inc(*rb->pending_refcnt); 1206 } 1207 1208 static void packet_dec_pending(struct packet_ring_buffer *rb) 1209 { 1210 this_cpu_dec(*rb->pending_refcnt); 1211 } 1212 1213 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1214 { 1215 unsigned int refcnt = 0; 1216 int cpu; 1217 1218 /* We don't use pending refcount in rx_ring. */ 1219 if (rb->pending_refcnt == NULL) 1220 return 0; 1221 1222 for_each_possible_cpu(cpu) 1223 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1224 1225 return refcnt; 1226 } 1227 1228 static int packet_alloc_pending(struct packet_sock *po) 1229 { 1230 po->rx_ring.pending_refcnt = NULL; 1231 1232 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1233 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1234 return -ENOBUFS; 1235 1236 return 0; 1237 } 1238 1239 static void packet_free_pending(struct packet_sock *po) 1240 { 1241 free_percpu(po->tx_ring.pending_refcnt); 1242 } 1243 1244 #define ROOM_POW_OFF 2 1245 #define ROOM_NONE 0x0 1246 #define ROOM_LOW 0x1 1247 #define ROOM_NORMAL 0x2 1248 1249 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) 1250 { 1251 int idx, len; 1252 1253 len = READ_ONCE(po->rx_ring.frame_max) + 1; 1254 idx = READ_ONCE(po->rx_ring.head); 1255 if (pow_off) 1256 idx += len >> pow_off; 1257 if (idx >= len) 1258 idx -= len; 1259 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1260 } 1261 1262 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) 1263 { 1264 int idx, len; 1265 1266 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); 1267 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); 1268 if (pow_off) 1269 idx += len >> pow_off; 1270 if (idx >= len) 1271 idx -= len; 1272 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1273 } 1274 1275 static int __packet_rcv_has_room(const struct packet_sock *po, 1276 const struct sk_buff *skb) 1277 { 1278 const struct sock *sk = &po->sk; 1279 int ret = ROOM_NONE; 1280 1281 if (po->prot_hook.func != tpacket_rcv) { 1282 int rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1283 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1284 - (skb ? skb->truesize : 0); 1285 1286 if (avail > (rcvbuf >> ROOM_POW_OFF)) 1287 return ROOM_NORMAL; 1288 else if (avail > 0) 1289 return ROOM_LOW; 1290 else 1291 return ROOM_NONE; 1292 } 1293 1294 if (po->tp_version == TPACKET_V3) { 1295 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1296 ret = ROOM_NORMAL; 1297 else if (__tpacket_v3_has_room(po, 0)) 1298 ret = ROOM_LOW; 1299 } else { 1300 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1301 ret = ROOM_NORMAL; 1302 else if (__tpacket_has_room(po, 0)) 1303 ret = ROOM_LOW; 1304 } 1305 1306 return ret; 1307 } 1308 1309 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1310 { 1311 bool pressure; 1312 int ret; 1313 1314 ret = __packet_rcv_has_room(po, skb); 1315 pressure = ret != ROOM_NORMAL; 1316 1317 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure) 1318 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure); 1319 1320 return ret; 1321 } 1322 1323 static void packet_rcv_try_clear_pressure(struct packet_sock *po) 1324 { 1325 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) && 1326 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 1327 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false); 1328 } 1329 1330 static void packet_sock_destruct(struct sock *sk) 1331 { 1332 skb_queue_purge(&sk->sk_error_queue); 1333 1334 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1335 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1336 1337 if (!sock_flag(sk, SOCK_DEAD)) { 1338 pr_err("Attempt to release alive packet socket: %p\n", sk); 1339 return; 1340 } 1341 } 1342 1343 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1344 { 1345 u32 *history = po->rollover->history; 1346 u32 victim, rxhash; 1347 int i, count = 0; 1348 1349 rxhash = skb_get_hash(skb); 1350 for (i = 0; i < ROLLOVER_HLEN; i++) 1351 if (READ_ONCE(history[i]) == rxhash) 1352 count++; 1353 1354 victim = get_random_u32_below(ROLLOVER_HLEN); 1355 1356 /* Avoid dirtying the cache line if possible */ 1357 if (READ_ONCE(history[victim]) != rxhash) 1358 WRITE_ONCE(history[victim], rxhash); 1359 1360 return count > (ROLLOVER_HLEN >> 1); 1361 } 1362 1363 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1364 struct sk_buff *skb, 1365 unsigned int num) 1366 { 1367 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1368 } 1369 1370 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1371 struct sk_buff *skb, 1372 unsigned int num) 1373 { 1374 unsigned int val = atomic_inc_return(&f->rr_cur); 1375 1376 return val % num; 1377 } 1378 1379 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1380 struct sk_buff *skb, 1381 unsigned int num) 1382 { 1383 return smp_processor_id() % num; 1384 } 1385 1386 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1387 struct sk_buff *skb, 1388 unsigned int num) 1389 { 1390 return get_random_u32_below(num); 1391 } 1392 1393 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1394 struct sk_buff *skb, 1395 unsigned int idx, bool try_self, 1396 unsigned int num) 1397 { 1398 struct packet_sock *po, *po_next, *po_skip = NULL; 1399 unsigned int i, j, room = ROOM_NONE; 1400 1401 po = pkt_sk(rcu_dereference(f->arr[idx])); 1402 1403 if (try_self) { 1404 room = packet_rcv_has_room(po, skb); 1405 if (room == ROOM_NORMAL || 1406 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1407 return idx; 1408 po_skip = po; 1409 } 1410 1411 i = j = min_t(int, po->rollover->sock, num - 1); 1412 do { 1413 po_next = pkt_sk(rcu_dereference(f->arr[i])); 1414 if (po_next != po_skip && 1415 !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) && 1416 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1417 if (i != j) 1418 po->rollover->sock = i; 1419 atomic_long_inc(&po->rollover->num); 1420 if (room == ROOM_LOW) 1421 atomic_long_inc(&po->rollover->num_huge); 1422 return i; 1423 } 1424 1425 if (++i == num) 1426 i = 0; 1427 } while (i != j); 1428 1429 atomic_long_inc(&po->rollover->num_failed); 1430 return idx; 1431 } 1432 1433 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1434 struct sk_buff *skb, 1435 unsigned int num) 1436 { 1437 return skb_get_queue_mapping(skb) % num; 1438 } 1439 1440 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1441 struct sk_buff *skb, 1442 unsigned int num) 1443 { 1444 struct bpf_prog *prog; 1445 unsigned int ret = 0; 1446 1447 rcu_read_lock(); 1448 prog = rcu_dereference(f->bpf_prog); 1449 if (prog) 1450 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1451 rcu_read_unlock(); 1452 1453 return ret; 1454 } 1455 1456 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1457 { 1458 return f->flags & (flag >> 8); 1459 } 1460 1461 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1462 struct packet_type *pt, struct net_device *orig_dev) 1463 { 1464 struct packet_fanout *f = pt->af_packet_priv; 1465 unsigned int num = READ_ONCE(f->num_members); 1466 struct net *net = read_pnet(&f->net); 1467 struct packet_sock *po; 1468 unsigned int idx; 1469 1470 if (!net_eq(dev_net(dev), net) || !num) { 1471 kfree_skb(skb); 1472 return 0; 1473 } 1474 1475 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1476 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1477 if (!skb) 1478 return 0; 1479 } 1480 switch (f->type) { 1481 case PACKET_FANOUT_HASH: 1482 default: 1483 idx = fanout_demux_hash(f, skb, num); 1484 break; 1485 case PACKET_FANOUT_LB: 1486 idx = fanout_demux_lb(f, skb, num); 1487 break; 1488 case PACKET_FANOUT_CPU: 1489 idx = fanout_demux_cpu(f, skb, num); 1490 break; 1491 case PACKET_FANOUT_RND: 1492 idx = fanout_demux_rnd(f, skb, num); 1493 break; 1494 case PACKET_FANOUT_QM: 1495 idx = fanout_demux_qm(f, skb, num); 1496 break; 1497 case PACKET_FANOUT_ROLLOVER: 1498 idx = fanout_demux_rollover(f, skb, 0, false, num); 1499 break; 1500 case PACKET_FANOUT_CBPF: 1501 case PACKET_FANOUT_EBPF: 1502 idx = fanout_demux_bpf(f, skb, num); 1503 break; 1504 } 1505 1506 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1507 idx = fanout_demux_rollover(f, skb, idx, true, num); 1508 1509 po = pkt_sk(rcu_dereference(f->arr[idx])); 1510 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1511 } 1512 1513 DEFINE_MUTEX(fanout_mutex); 1514 EXPORT_SYMBOL_GPL(fanout_mutex); 1515 static LIST_HEAD(fanout_list); 1516 static u16 fanout_next_id; 1517 1518 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1519 { 1520 struct packet_fanout *f = po->fanout; 1521 1522 spin_lock(&f->lock); 1523 rcu_assign_pointer(f->arr[f->num_members], sk); 1524 smp_wmb(); 1525 f->num_members++; 1526 if (f->num_members == 1) 1527 dev_add_pack(&f->prot_hook); 1528 spin_unlock(&f->lock); 1529 } 1530 1531 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1532 { 1533 struct packet_fanout *f = po->fanout; 1534 int i; 1535 1536 spin_lock(&f->lock); 1537 for (i = 0; i < f->num_members; i++) { 1538 if (rcu_dereference_protected(f->arr[i], 1539 lockdep_is_held(&f->lock)) == sk) 1540 break; 1541 } 1542 BUG_ON(i >= f->num_members); 1543 rcu_assign_pointer(f->arr[i], 1544 rcu_dereference_protected(f->arr[f->num_members - 1], 1545 lockdep_is_held(&f->lock))); 1546 f->num_members--; 1547 if (f->num_members == 0) 1548 __dev_remove_pack(&f->prot_hook); 1549 spin_unlock(&f->lock); 1550 } 1551 1552 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1553 { 1554 if (sk->sk_family != PF_PACKET) 1555 return false; 1556 1557 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1558 } 1559 1560 static void fanout_init_data(struct packet_fanout *f) 1561 { 1562 switch (f->type) { 1563 case PACKET_FANOUT_LB: 1564 atomic_set(&f->rr_cur, 0); 1565 break; 1566 case PACKET_FANOUT_CBPF: 1567 case PACKET_FANOUT_EBPF: 1568 RCU_INIT_POINTER(f->bpf_prog, NULL); 1569 break; 1570 } 1571 } 1572 1573 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1574 { 1575 struct bpf_prog *old; 1576 1577 spin_lock(&f->lock); 1578 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1579 rcu_assign_pointer(f->bpf_prog, new); 1580 spin_unlock(&f->lock); 1581 1582 if (old) { 1583 synchronize_net(); 1584 bpf_prog_destroy(old); 1585 } 1586 } 1587 1588 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, 1589 unsigned int len) 1590 { 1591 struct bpf_prog *new; 1592 struct sock_fprog fprog; 1593 int ret; 1594 1595 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1596 return -EPERM; 1597 1598 ret = copy_bpf_fprog_from_user(&fprog, data, len); 1599 if (ret) 1600 return ret; 1601 1602 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1603 if (ret) 1604 return ret; 1605 1606 __fanout_set_data_bpf(po->fanout, new); 1607 return 0; 1608 } 1609 1610 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, 1611 unsigned int len) 1612 { 1613 struct bpf_prog *new; 1614 u32 fd; 1615 1616 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1617 return -EPERM; 1618 if (len != sizeof(fd)) 1619 return -EINVAL; 1620 if (copy_from_sockptr(&fd, data, len)) 1621 return -EFAULT; 1622 1623 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1624 if (IS_ERR(new)) 1625 return PTR_ERR(new); 1626 1627 __fanout_set_data_bpf(po->fanout, new); 1628 return 0; 1629 } 1630 1631 static int fanout_set_data(struct packet_sock *po, sockptr_t data, 1632 unsigned int len) 1633 { 1634 switch (po->fanout->type) { 1635 case PACKET_FANOUT_CBPF: 1636 return fanout_set_data_cbpf(po, data, len); 1637 case PACKET_FANOUT_EBPF: 1638 return fanout_set_data_ebpf(po, data, len); 1639 default: 1640 return -EINVAL; 1641 } 1642 } 1643 1644 static void fanout_release_data(struct packet_fanout *f) 1645 { 1646 switch (f->type) { 1647 case PACKET_FANOUT_CBPF: 1648 case PACKET_FANOUT_EBPF: 1649 __fanout_set_data_bpf(f, NULL); 1650 } 1651 } 1652 1653 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1654 { 1655 struct packet_fanout *f; 1656 1657 list_for_each_entry(f, &fanout_list, list) { 1658 if (f->id == candidate_id && 1659 read_pnet(&f->net) == sock_net(sk)) { 1660 return false; 1661 } 1662 } 1663 return true; 1664 } 1665 1666 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1667 { 1668 u16 id = fanout_next_id; 1669 1670 do { 1671 if (__fanout_id_is_free(sk, id)) { 1672 *new_id = id; 1673 fanout_next_id = id + 1; 1674 return true; 1675 } 1676 1677 id++; 1678 } while (id != fanout_next_id); 1679 1680 return false; 1681 } 1682 1683 static int fanout_add(struct sock *sk, struct fanout_args *args) 1684 { 1685 struct packet_rollover *rollover = NULL; 1686 struct packet_sock *po = pkt_sk(sk); 1687 u16 type_flags = args->type_flags; 1688 struct packet_fanout *f, *match; 1689 u8 type = type_flags & 0xff; 1690 u8 flags = type_flags >> 8; 1691 u16 id = args->id; 1692 int err; 1693 1694 switch (type) { 1695 case PACKET_FANOUT_ROLLOVER: 1696 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1697 return -EINVAL; 1698 break; 1699 case PACKET_FANOUT_HASH: 1700 case PACKET_FANOUT_LB: 1701 case PACKET_FANOUT_CPU: 1702 case PACKET_FANOUT_RND: 1703 case PACKET_FANOUT_QM: 1704 case PACKET_FANOUT_CBPF: 1705 case PACKET_FANOUT_EBPF: 1706 break; 1707 default: 1708 return -EINVAL; 1709 } 1710 1711 mutex_lock(&fanout_mutex); 1712 1713 err = -EALREADY; 1714 if (po->fanout) 1715 goto out; 1716 1717 if (type == PACKET_FANOUT_ROLLOVER || 1718 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1719 err = -ENOMEM; 1720 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); 1721 if (!rollover) 1722 goto out; 1723 atomic_long_set(&rollover->num, 0); 1724 atomic_long_set(&rollover->num_huge, 0); 1725 atomic_long_set(&rollover->num_failed, 0); 1726 } 1727 1728 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1729 if (id != 0) { 1730 err = -EINVAL; 1731 goto out; 1732 } 1733 if (!fanout_find_new_id(sk, &id)) { 1734 err = -ENOMEM; 1735 goto out; 1736 } 1737 /* ephemeral flag for the first socket in the group: drop it */ 1738 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1739 } 1740 1741 match = NULL; 1742 list_for_each_entry(f, &fanout_list, list) { 1743 if (f->id == id && 1744 read_pnet(&f->net) == sock_net(sk)) { 1745 match = f; 1746 break; 1747 } 1748 } 1749 err = -EINVAL; 1750 if (match) { 1751 if (match->flags != flags) 1752 goto out; 1753 if (args->max_num_members && 1754 args->max_num_members != match->max_num_members) 1755 goto out; 1756 } else { 1757 if (args->max_num_members > PACKET_FANOUT_MAX) 1758 goto out; 1759 if (!args->max_num_members) 1760 /* legacy PACKET_FANOUT_MAX */ 1761 args->max_num_members = 256; 1762 err = -ENOMEM; 1763 match = kvzalloc(struct_size(match, arr, args->max_num_members), 1764 GFP_KERNEL); 1765 if (!match) 1766 goto out; 1767 write_pnet(&match->net, sock_net(sk)); 1768 match->id = id; 1769 match->type = type; 1770 match->flags = flags; 1771 INIT_LIST_HEAD(&match->list); 1772 spin_lock_init(&match->lock); 1773 refcount_set(&match->sk_ref, 0); 1774 fanout_init_data(match); 1775 match->prot_hook.type = po->prot_hook.type; 1776 match->prot_hook.dev = po->prot_hook.dev; 1777 match->prot_hook.func = packet_rcv_fanout; 1778 match->prot_hook.af_packet_priv = match; 1779 match->prot_hook.af_packet_net = read_pnet(&match->net); 1780 match->prot_hook.id_match = match_fanout_group; 1781 match->max_num_members = args->max_num_members; 1782 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING; 1783 list_add(&match->list, &fanout_list); 1784 } 1785 err = -EINVAL; 1786 1787 spin_lock(&po->bind_lock); 1788 if (packet_sock_flag(po, PACKET_SOCK_RUNNING) && 1789 match->type == type && 1790 match->prot_hook.type == po->prot_hook.type && 1791 match->prot_hook.dev == po->prot_hook.dev) { 1792 err = -ENOSPC; 1793 if (refcount_read(&match->sk_ref) < match->max_num_members) { 1794 __dev_remove_pack(&po->prot_hook); 1795 1796 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ 1797 WRITE_ONCE(po->fanout, match); 1798 1799 po->rollover = rollover; 1800 rollover = NULL; 1801 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1802 __fanout_link(sk, po); 1803 err = 0; 1804 } 1805 } 1806 spin_unlock(&po->bind_lock); 1807 1808 if (err && !refcount_read(&match->sk_ref)) { 1809 list_del(&match->list); 1810 kvfree(match); 1811 } 1812 1813 out: 1814 kfree(rollover); 1815 mutex_unlock(&fanout_mutex); 1816 return err; 1817 } 1818 1819 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1820 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1821 * It is the responsibility of the caller to call fanout_release_data() and 1822 * free the returned packet_fanout (after synchronize_net()) 1823 */ 1824 static struct packet_fanout *fanout_release(struct sock *sk) 1825 { 1826 struct packet_sock *po = pkt_sk(sk); 1827 struct packet_fanout *f; 1828 1829 mutex_lock(&fanout_mutex); 1830 f = po->fanout; 1831 if (f) { 1832 po->fanout = NULL; 1833 1834 if (refcount_dec_and_test(&f->sk_ref)) 1835 list_del(&f->list); 1836 else 1837 f = NULL; 1838 } 1839 mutex_unlock(&fanout_mutex); 1840 1841 return f; 1842 } 1843 1844 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1845 struct sk_buff *skb) 1846 { 1847 /* Earlier code assumed this would be a VLAN pkt, double-check 1848 * this now that we have the actual packet in hand. We can only 1849 * do this check on Ethernet devices. 1850 */ 1851 if (unlikely(dev->type != ARPHRD_ETHER)) 1852 return false; 1853 1854 skb_reset_mac_header(skb); 1855 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1856 } 1857 1858 static const struct proto_ops packet_ops; 1859 1860 static const struct proto_ops packet_ops_spkt; 1861 1862 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1863 struct packet_type *pt, struct net_device *orig_dev) 1864 { 1865 struct sock *sk; 1866 struct sockaddr_pkt *spkt; 1867 1868 /* 1869 * When we registered the protocol we saved the socket in the data 1870 * field for just this event. 1871 */ 1872 1873 sk = pt->af_packet_priv; 1874 1875 /* 1876 * Yank back the headers [hope the device set this 1877 * right or kerboom...] 1878 * 1879 * Incoming packets have ll header pulled, 1880 * push it back. 1881 * 1882 * For outgoing ones skb->data == skb_mac_header(skb) 1883 * so that this procedure is noop. 1884 */ 1885 1886 if (skb->pkt_type == PACKET_LOOPBACK) 1887 goto out; 1888 1889 if (!net_eq(dev_net(dev), sock_net(sk))) 1890 goto out; 1891 1892 skb = skb_share_check(skb, GFP_ATOMIC); 1893 if (skb == NULL) 1894 goto oom; 1895 1896 /* drop any routing info */ 1897 skb_dst_drop(skb); 1898 1899 /* drop conntrack reference */ 1900 nf_reset_ct(skb); 1901 1902 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1903 1904 skb_push(skb, skb->data - skb_mac_header(skb)); 1905 1906 /* 1907 * The SOCK_PACKET socket receives _all_ frames. 1908 */ 1909 1910 spkt->spkt_family = dev->type; 1911 strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1912 spkt->spkt_protocol = skb->protocol; 1913 1914 /* 1915 * Charge the memory to the socket. This is done specifically 1916 * to prevent sockets using all the memory up. 1917 */ 1918 1919 if (sock_queue_rcv_skb(sk, skb) == 0) 1920 return 0; 1921 1922 out: 1923 kfree_skb(skb); 1924 oom: 1925 return 0; 1926 } 1927 1928 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) 1929 { 1930 int depth; 1931 1932 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && 1933 sock->type == SOCK_RAW) { 1934 skb_reset_mac_header(skb); 1935 skb->protocol = dev_parse_header_protocol(skb); 1936 } 1937 1938 /* Move network header to the right position for VLAN tagged packets */ 1939 if (likely(skb->dev->type == ARPHRD_ETHER) && 1940 eth_type_vlan(skb->protocol) && 1941 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) 1942 skb_set_network_header(skb, depth); 1943 1944 skb_probe_transport_header(skb); 1945 } 1946 1947 /* 1948 * Output a raw packet to a device layer. This bypasses all the other 1949 * protocol layers and you must therefore supply it with a complete frame 1950 */ 1951 1952 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1953 size_t len) 1954 { 1955 struct sock *sk = sock->sk; 1956 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1957 struct sk_buff *skb = NULL; 1958 struct net_device *dev; 1959 struct sockcm_cookie sockc; 1960 __be16 proto = 0; 1961 int err; 1962 int extra_len = 0; 1963 1964 /* 1965 * Get and verify the address. 1966 */ 1967 1968 if (saddr) { 1969 if (msg->msg_namelen < sizeof(struct sockaddr)) 1970 return -EINVAL; 1971 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1972 proto = saddr->spkt_protocol; 1973 } else 1974 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1975 1976 /* 1977 * Find the device first to size check it 1978 */ 1979 1980 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1981 retry: 1982 rcu_read_lock(); 1983 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1984 err = -ENODEV; 1985 if (dev == NULL) 1986 goto out_unlock; 1987 1988 err = -ENETDOWN; 1989 if (!(dev->flags & IFF_UP)) 1990 goto out_unlock; 1991 1992 /* 1993 * You may not queue a frame bigger than the mtu. This is the lowest level 1994 * raw protocol and you must do your own fragmentation at this level. 1995 */ 1996 1997 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1998 if (!netif_supports_nofcs(dev)) { 1999 err = -EPROTONOSUPPORT; 2000 goto out_unlock; 2001 } 2002 extra_len = 4; /* We're doing our own CRC */ 2003 } 2004 2005 err = -EMSGSIZE; 2006 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 2007 goto out_unlock; 2008 2009 if (!skb) { 2010 size_t reserved = LL_RESERVED_SPACE(dev); 2011 int tlen = dev->needed_tailroom; 2012 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 2013 2014 rcu_read_unlock(); 2015 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 2016 if (skb == NULL) 2017 return -ENOBUFS; 2018 /* FIXME: Save some space for broken drivers that write a hard 2019 * header at transmission time by themselves. PPP is the notable 2020 * one here. This should really be fixed at the driver level. 2021 */ 2022 skb_reserve(skb, reserved); 2023 skb_reset_network_header(skb); 2024 2025 /* Try to align data part correctly */ 2026 if (hhlen) { 2027 skb->data -= hhlen; 2028 skb->tail -= hhlen; 2029 if (len < hhlen) 2030 skb_reset_network_header(skb); 2031 } 2032 err = memcpy_from_msg(skb_put(skb, len), msg, len); 2033 if (err) 2034 goto out_free; 2035 goto retry; 2036 } 2037 2038 if (!dev_validate_header(dev, skb->data, len) || !skb->len) { 2039 err = -EINVAL; 2040 goto out_unlock; 2041 } 2042 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 2043 !packet_extra_vlan_len_allowed(dev, skb)) { 2044 err = -EMSGSIZE; 2045 goto out_unlock; 2046 } 2047 2048 sockcm_init(&sockc, sk); 2049 if (msg->msg_controllen) { 2050 err = sock_cmsg_send(sk, msg, &sockc); 2051 if (unlikely(err)) 2052 goto out_unlock; 2053 } 2054 2055 skb->protocol = proto; 2056 skb->dev = dev; 2057 skb->priority = READ_ONCE(sk->sk_priority); 2058 skb->mark = READ_ONCE(sk->sk_mark); 2059 skb->tstamp = sockc.transmit_time; 2060 skb->mono_delivery_time = !!skb->tstamp; 2061 skb_setup_tx_timestamp(skb, sockc.tsflags); 2062 2063 if (unlikely(extra_len == 4)) 2064 skb->no_fcs = 1; 2065 2066 packet_parse_headers(skb, sock); 2067 2068 dev_queue_xmit(skb); 2069 rcu_read_unlock(); 2070 return len; 2071 2072 out_unlock: 2073 rcu_read_unlock(); 2074 out_free: 2075 kfree_skb(skb); 2076 return err; 2077 } 2078 2079 static unsigned int run_filter(struct sk_buff *skb, 2080 const struct sock *sk, 2081 unsigned int res) 2082 { 2083 struct sk_filter *filter; 2084 2085 rcu_read_lock(); 2086 filter = rcu_dereference(sk->sk_filter); 2087 if (filter != NULL) 2088 res = bpf_prog_run_clear_cb(filter->prog, skb); 2089 rcu_read_unlock(); 2090 2091 return res; 2092 } 2093 2094 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2095 size_t *len, int vnet_hdr_sz) 2096 { 2097 struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 }; 2098 2099 if (*len < vnet_hdr_sz) 2100 return -EINVAL; 2101 *len -= vnet_hdr_sz; 2102 2103 if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0)) 2104 return -EINVAL; 2105 2106 return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz); 2107 } 2108 2109 /* 2110 * This function makes lazy skb cloning in hope that most of packets 2111 * are discarded by BPF. 2112 * 2113 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2114 * and skb->cb are mangled. It works because (and until) packets 2115 * falling here are owned by current CPU. Output packets are cloned 2116 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2117 * sequentially, so that if we return skb to original state on exit, 2118 * we will not harm anyone. 2119 */ 2120 2121 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2122 struct packet_type *pt, struct net_device *orig_dev) 2123 { 2124 enum skb_drop_reason drop_reason = SKB_CONSUMED; 2125 struct sock *sk; 2126 struct sockaddr_ll *sll; 2127 struct packet_sock *po; 2128 u8 *skb_head = skb->data; 2129 int skb_len = skb->len; 2130 unsigned int snaplen, res; 2131 2132 if (skb->pkt_type == PACKET_LOOPBACK) 2133 goto drop; 2134 2135 sk = pt->af_packet_priv; 2136 po = pkt_sk(sk); 2137 2138 if (!net_eq(dev_net(dev), sock_net(sk))) 2139 goto drop; 2140 2141 skb->dev = dev; 2142 2143 if (dev_has_header(dev)) { 2144 /* The device has an explicit notion of ll header, 2145 * exported to higher levels. 2146 * 2147 * Otherwise, the device hides details of its frame 2148 * structure, so that corresponding packet head is 2149 * never delivered to user. 2150 */ 2151 if (sk->sk_type != SOCK_DGRAM) 2152 skb_push(skb, skb->data - skb_mac_header(skb)); 2153 else if (skb->pkt_type == PACKET_OUTGOING) { 2154 /* Special case: outgoing packets have ll header at head */ 2155 skb_pull(skb, skb_network_offset(skb)); 2156 } 2157 } 2158 2159 snaplen = skb->len; 2160 2161 res = run_filter(skb, sk, snaplen); 2162 if (!res) 2163 goto drop_n_restore; 2164 if (snaplen > res) 2165 snaplen = res; 2166 2167 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2168 goto drop_n_acct; 2169 2170 if (skb_shared(skb)) { 2171 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2172 if (nskb == NULL) 2173 goto drop_n_acct; 2174 2175 if (skb_head != skb->data) { 2176 skb->data = skb_head; 2177 skb->len = skb_len; 2178 } 2179 consume_skb(skb); 2180 skb = nskb; 2181 } 2182 2183 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2184 2185 sll = &PACKET_SKB_CB(skb)->sa.ll; 2186 sll->sll_hatype = dev->type; 2187 sll->sll_pkttype = skb->pkt_type; 2188 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2189 sll->sll_ifindex = orig_dev->ifindex; 2190 else 2191 sll->sll_ifindex = dev->ifindex; 2192 2193 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2194 2195 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2196 * Use their space for storing the original skb length. 2197 */ 2198 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2199 2200 if (pskb_trim(skb, snaplen)) 2201 goto drop_n_acct; 2202 2203 skb_set_owner_r(skb, sk); 2204 skb->dev = NULL; 2205 skb_dst_drop(skb); 2206 2207 /* drop conntrack reference */ 2208 nf_reset_ct(skb); 2209 2210 spin_lock(&sk->sk_receive_queue.lock); 2211 po->stats.stats1.tp_packets++; 2212 sock_skb_set_dropcount(sk, skb); 2213 skb_clear_delivery_time(skb); 2214 __skb_queue_tail(&sk->sk_receive_queue, skb); 2215 spin_unlock(&sk->sk_receive_queue.lock); 2216 sk->sk_data_ready(sk); 2217 return 0; 2218 2219 drop_n_acct: 2220 atomic_inc(&po->tp_drops); 2221 atomic_inc(&sk->sk_drops); 2222 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2223 2224 drop_n_restore: 2225 if (skb_head != skb->data && skb_shared(skb)) { 2226 skb->data = skb_head; 2227 skb->len = skb_len; 2228 } 2229 drop: 2230 kfree_skb_reason(skb, drop_reason); 2231 return 0; 2232 } 2233 2234 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2235 struct packet_type *pt, struct net_device *orig_dev) 2236 { 2237 enum skb_drop_reason drop_reason = SKB_CONSUMED; 2238 struct sock *sk; 2239 struct packet_sock *po; 2240 struct sockaddr_ll *sll; 2241 union tpacket_uhdr h; 2242 u8 *skb_head = skb->data; 2243 int skb_len = skb->len; 2244 unsigned int snaplen, res; 2245 unsigned long status = TP_STATUS_USER; 2246 unsigned short macoff, hdrlen; 2247 unsigned int netoff; 2248 struct sk_buff *copy_skb = NULL; 2249 struct timespec64 ts; 2250 __u32 ts_status; 2251 unsigned int slot_id = 0; 2252 int vnet_hdr_sz = 0; 2253 2254 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2255 * We may add members to them until current aligned size without forcing 2256 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2257 */ 2258 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2259 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2260 2261 if (skb->pkt_type == PACKET_LOOPBACK) 2262 goto drop; 2263 2264 sk = pt->af_packet_priv; 2265 po = pkt_sk(sk); 2266 2267 if (!net_eq(dev_net(dev), sock_net(sk))) 2268 goto drop; 2269 2270 if (dev_has_header(dev)) { 2271 if (sk->sk_type != SOCK_DGRAM) 2272 skb_push(skb, skb->data - skb_mac_header(skb)); 2273 else if (skb->pkt_type == PACKET_OUTGOING) { 2274 /* Special case: outgoing packets have ll header at head */ 2275 skb_pull(skb, skb_network_offset(skb)); 2276 } 2277 } 2278 2279 snaplen = skb->len; 2280 2281 res = run_filter(skb, sk, snaplen); 2282 if (!res) 2283 goto drop_n_restore; 2284 2285 /* If we are flooded, just give up */ 2286 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { 2287 atomic_inc(&po->tp_drops); 2288 goto drop_n_restore; 2289 } 2290 2291 if (skb->ip_summed == CHECKSUM_PARTIAL) 2292 status |= TP_STATUS_CSUMNOTREADY; 2293 else if (skb->pkt_type != PACKET_OUTGOING && 2294 skb_csum_unnecessary(skb)) 2295 status |= TP_STATUS_CSUM_VALID; 2296 if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) 2297 status |= TP_STATUS_GSO_TCP; 2298 2299 if (snaplen > res) 2300 snaplen = res; 2301 2302 if (sk->sk_type == SOCK_DGRAM) { 2303 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2304 po->tp_reserve; 2305 } else { 2306 unsigned int maclen = skb_network_offset(skb); 2307 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2308 (maclen < 16 ? 16 : maclen)) + 2309 po->tp_reserve; 2310 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2311 if (vnet_hdr_sz) 2312 netoff += vnet_hdr_sz; 2313 macoff = netoff - maclen; 2314 } 2315 if (netoff > USHRT_MAX) { 2316 atomic_inc(&po->tp_drops); 2317 goto drop_n_restore; 2318 } 2319 if (po->tp_version <= TPACKET_V2) { 2320 if (macoff + snaplen > po->rx_ring.frame_size) { 2321 if (READ_ONCE(po->copy_thresh) && 2322 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2323 if (skb_shared(skb)) { 2324 copy_skb = skb_clone(skb, GFP_ATOMIC); 2325 } else { 2326 copy_skb = skb_get(skb); 2327 skb_head = skb->data; 2328 } 2329 if (copy_skb) { 2330 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0, 2331 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); 2332 skb_set_owner_r(copy_skb, sk); 2333 } 2334 } 2335 snaplen = po->rx_ring.frame_size - macoff; 2336 if ((int)snaplen < 0) { 2337 snaplen = 0; 2338 vnet_hdr_sz = 0; 2339 } 2340 } 2341 } else if (unlikely(macoff + snaplen > 2342 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2343 u32 nval; 2344 2345 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2346 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2347 snaplen, nval, macoff); 2348 snaplen = nval; 2349 if (unlikely((int)snaplen < 0)) { 2350 snaplen = 0; 2351 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2352 vnet_hdr_sz = 0; 2353 } 2354 } 2355 spin_lock(&sk->sk_receive_queue.lock); 2356 h.raw = packet_current_rx_frame(po, skb, 2357 TP_STATUS_KERNEL, (macoff+snaplen)); 2358 if (!h.raw) 2359 goto drop_n_account; 2360 2361 if (po->tp_version <= TPACKET_V2) { 2362 slot_id = po->rx_ring.head; 2363 if (test_bit(slot_id, po->rx_ring.rx_owner_map)) 2364 goto drop_n_account; 2365 __set_bit(slot_id, po->rx_ring.rx_owner_map); 2366 } 2367 2368 if (vnet_hdr_sz && 2369 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2370 sizeof(struct virtio_net_hdr), 2371 vio_le(), true, 0)) { 2372 if (po->tp_version == TPACKET_V3) 2373 prb_clear_blk_fill_status(&po->rx_ring); 2374 goto drop_n_account; 2375 } 2376 2377 if (po->tp_version <= TPACKET_V2) { 2378 packet_increment_rx_head(po, &po->rx_ring); 2379 /* 2380 * LOSING will be reported till you read the stats, 2381 * because it's COR - Clear On Read. 2382 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2383 * at packet level. 2384 */ 2385 if (atomic_read(&po->tp_drops)) 2386 status |= TP_STATUS_LOSING; 2387 } 2388 2389 po->stats.stats1.tp_packets++; 2390 if (copy_skb) { 2391 status |= TP_STATUS_COPY; 2392 skb_clear_delivery_time(copy_skb); 2393 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2394 } 2395 spin_unlock(&sk->sk_receive_queue.lock); 2396 2397 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2398 2399 /* Always timestamp; prefer an existing software timestamp taken 2400 * closer to the time of capture. 2401 */ 2402 ts_status = tpacket_get_timestamp(skb, &ts, 2403 READ_ONCE(po->tp_tstamp) | 2404 SOF_TIMESTAMPING_SOFTWARE); 2405 if (!ts_status) 2406 ktime_get_real_ts64(&ts); 2407 2408 status |= ts_status; 2409 2410 switch (po->tp_version) { 2411 case TPACKET_V1: 2412 h.h1->tp_len = skb->len; 2413 h.h1->tp_snaplen = snaplen; 2414 h.h1->tp_mac = macoff; 2415 h.h1->tp_net = netoff; 2416 h.h1->tp_sec = ts.tv_sec; 2417 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2418 hdrlen = sizeof(*h.h1); 2419 break; 2420 case TPACKET_V2: 2421 h.h2->tp_len = skb->len; 2422 h.h2->tp_snaplen = snaplen; 2423 h.h2->tp_mac = macoff; 2424 h.h2->tp_net = netoff; 2425 h.h2->tp_sec = ts.tv_sec; 2426 h.h2->tp_nsec = ts.tv_nsec; 2427 if (skb_vlan_tag_present(skb)) { 2428 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2429 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2430 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2431 } else { 2432 h.h2->tp_vlan_tci = 0; 2433 h.h2->tp_vlan_tpid = 0; 2434 } 2435 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2436 hdrlen = sizeof(*h.h2); 2437 break; 2438 case TPACKET_V3: 2439 /* tp_nxt_offset,vlan are already populated above. 2440 * So DONT clear those fields here 2441 */ 2442 h.h3->tp_status |= status; 2443 h.h3->tp_len = skb->len; 2444 h.h3->tp_snaplen = snaplen; 2445 h.h3->tp_mac = macoff; 2446 h.h3->tp_net = netoff; 2447 h.h3->tp_sec = ts.tv_sec; 2448 h.h3->tp_nsec = ts.tv_nsec; 2449 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2450 hdrlen = sizeof(*h.h3); 2451 break; 2452 default: 2453 BUG(); 2454 } 2455 2456 sll = h.raw + TPACKET_ALIGN(hdrlen); 2457 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2458 sll->sll_family = AF_PACKET; 2459 sll->sll_hatype = dev->type; 2460 sll->sll_protocol = skb->protocol; 2461 sll->sll_pkttype = skb->pkt_type; 2462 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2463 sll->sll_ifindex = orig_dev->ifindex; 2464 else 2465 sll->sll_ifindex = dev->ifindex; 2466 2467 smp_mb(); 2468 2469 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2470 if (po->tp_version <= TPACKET_V2) { 2471 u8 *start, *end; 2472 2473 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2474 macoff + snaplen); 2475 2476 for (start = h.raw; start < end; start += PAGE_SIZE) 2477 flush_dcache_page(pgv_to_page(start)); 2478 } 2479 smp_wmb(); 2480 #endif 2481 2482 if (po->tp_version <= TPACKET_V2) { 2483 spin_lock(&sk->sk_receive_queue.lock); 2484 __packet_set_status(po, h.raw, status); 2485 __clear_bit(slot_id, po->rx_ring.rx_owner_map); 2486 spin_unlock(&sk->sk_receive_queue.lock); 2487 sk->sk_data_ready(sk); 2488 } else if (po->tp_version == TPACKET_V3) { 2489 prb_clear_blk_fill_status(&po->rx_ring); 2490 } 2491 2492 drop_n_restore: 2493 if (skb_head != skb->data && skb_shared(skb)) { 2494 skb->data = skb_head; 2495 skb->len = skb_len; 2496 } 2497 drop: 2498 kfree_skb_reason(skb, drop_reason); 2499 return 0; 2500 2501 drop_n_account: 2502 spin_unlock(&sk->sk_receive_queue.lock); 2503 atomic_inc(&po->tp_drops); 2504 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2505 2506 sk->sk_data_ready(sk); 2507 kfree_skb_reason(copy_skb, drop_reason); 2508 goto drop_n_restore; 2509 } 2510 2511 static void tpacket_destruct_skb(struct sk_buff *skb) 2512 { 2513 struct packet_sock *po = pkt_sk(skb->sk); 2514 2515 if (likely(po->tx_ring.pg_vec)) { 2516 void *ph; 2517 __u32 ts; 2518 2519 ph = skb_zcopy_get_nouarg(skb); 2520 packet_dec_pending(&po->tx_ring); 2521 2522 ts = __packet_set_timestamp(po, ph, skb); 2523 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2524 2525 if (!packet_read_pending(&po->tx_ring)) 2526 complete(&po->skb_completion); 2527 } 2528 2529 sock_wfree(skb); 2530 } 2531 2532 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2533 { 2534 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2535 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2536 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2537 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2538 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2539 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2540 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2541 2542 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2543 return -EINVAL; 2544 2545 return 0; 2546 } 2547 2548 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2549 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz) 2550 { 2551 int ret; 2552 2553 if (*len < vnet_hdr_sz) 2554 return -EINVAL; 2555 *len -= vnet_hdr_sz; 2556 2557 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2558 return -EFAULT; 2559 2560 ret = __packet_snd_vnet_parse(vnet_hdr, *len); 2561 if (ret) 2562 return ret; 2563 2564 /* move iter to point to the start of mac header */ 2565 if (vnet_hdr_sz != sizeof(struct virtio_net_hdr)) 2566 iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr)); 2567 2568 return 0; 2569 } 2570 2571 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2572 void *frame, struct net_device *dev, void *data, int tp_len, 2573 __be16 proto, unsigned char *addr, int hlen, int copylen, 2574 const struct sockcm_cookie *sockc) 2575 { 2576 union tpacket_uhdr ph; 2577 int to_write, offset, len, nr_frags, len_max; 2578 struct socket *sock = po->sk.sk_socket; 2579 struct page *page; 2580 int err; 2581 2582 ph.raw = frame; 2583 2584 skb->protocol = proto; 2585 skb->dev = dev; 2586 skb->priority = READ_ONCE(po->sk.sk_priority); 2587 skb->mark = READ_ONCE(po->sk.sk_mark); 2588 skb->tstamp = sockc->transmit_time; 2589 skb->mono_delivery_time = !!skb->tstamp; 2590 skb_setup_tx_timestamp(skb, sockc->tsflags); 2591 skb_zcopy_set_nouarg(skb, ph.raw); 2592 2593 skb_reserve(skb, hlen); 2594 skb_reset_network_header(skb); 2595 2596 to_write = tp_len; 2597 2598 if (sock->type == SOCK_DGRAM) { 2599 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2600 NULL, tp_len); 2601 if (unlikely(err < 0)) 2602 return -EINVAL; 2603 } else if (copylen) { 2604 int hdrlen = min_t(int, copylen, tp_len); 2605 2606 skb_push(skb, dev->hard_header_len); 2607 skb_put(skb, copylen - dev->hard_header_len); 2608 err = skb_store_bits(skb, 0, data, hdrlen); 2609 if (unlikely(err)) 2610 return err; 2611 if (!dev_validate_header(dev, skb->data, hdrlen)) 2612 return -EINVAL; 2613 2614 data += hdrlen; 2615 to_write -= hdrlen; 2616 } 2617 2618 offset = offset_in_page(data); 2619 len_max = PAGE_SIZE - offset; 2620 len = ((to_write > len_max) ? len_max : to_write); 2621 2622 skb->data_len = to_write; 2623 skb->len += to_write; 2624 skb->truesize += to_write; 2625 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2626 2627 while (likely(to_write)) { 2628 nr_frags = skb_shinfo(skb)->nr_frags; 2629 2630 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2631 pr_err("Packet exceed the number of skb frags(%u)\n", 2632 (unsigned int)MAX_SKB_FRAGS); 2633 return -EFAULT; 2634 } 2635 2636 page = pgv_to_page(data); 2637 data += len; 2638 flush_dcache_page(page); 2639 get_page(page); 2640 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2641 to_write -= len; 2642 offset = 0; 2643 len_max = PAGE_SIZE; 2644 len = ((to_write > len_max) ? len_max : to_write); 2645 } 2646 2647 packet_parse_headers(skb, sock); 2648 2649 return tp_len; 2650 } 2651 2652 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2653 int size_max, void **data) 2654 { 2655 union tpacket_uhdr ph; 2656 int tp_len, off; 2657 2658 ph.raw = frame; 2659 2660 switch (po->tp_version) { 2661 case TPACKET_V3: 2662 if (ph.h3->tp_next_offset != 0) { 2663 pr_warn_once("variable sized slot not supported"); 2664 return -EINVAL; 2665 } 2666 tp_len = ph.h3->tp_len; 2667 break; 2668 case TPACKET_V2: 2669 tp_len = ph.h2->tp_len; 2670 break; 2671 default: 2672 tp_len = ph.h1->tp_len; 2673 break; 2674 } 2675 if (unlikely(tp_len > size_max)) { 2676 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2677 return -EMSGSIZE; 2678 } 2679 2680 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) { 2681 int off_min, off_max; 2682 2683 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2684 off_max = po->tx_ring.frame_size - tp_len; 2685 if (po->sk.sk_type == SOCK_DGRAM) { 2686 switch (po->tp_version) { 2687 case TPACKET_V3: 2688 off = ph.h3->tp_net; 2689 break; 2690 case TPACKET_V2: 2691 off = ph.h2->tp_net; 2692 break; 2693 default: 2694 off = ph.h1->tp_net; 2695 break; 2696 } 2697 } else { 2698 switch (po->tp_version) { 2699 case TPACKET_V3: 2700 off = ph.h3->tp_mac; 2701 break; 2702 case TPACKET_V2: 2703 off = ph.h2->tp_mac; 2704 break; 2705 default: 2706 off = ph.h1->tp_mac; 2707 break; 2708 } 2709 } 2710 if (unlikely((off < off_min) || (off_max < off))) 2711 return -EINVAL; 2712 } else { 2713 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2714 } 2715 2716 *data = frame + off; 2717 return tp_len; 2718 } 2719 2720 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2721 { 2722 struct sk_buff *skb = NULL; 2723 struct net_device *dev; 2724 struct virtio_net_hdr *vnet_hdr = NULL; 2725 struct sockcm_cookie sockc; 2726 __be16 proto; 2727 int err, reserve = 0; 2728 void *ph; 2729 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2730 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2731 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2732 unsigned char *addr = NULL; 2733 int tp_len, size_max; 2734 void *data; 2735 int len_sum = 0; 2736 int status = TP_STATUS_AVAILABLE; 2737 int hlen, tlen, copylen = 0; 2738 long timeo = 0; 2739 2740 mutex_lock(&po->pg_vec_lock); 2741 2742 /* packet_sendmsg() check on tx_ring.pg_vec was lockless, 2743 * we need to confirm it under protection of pg_vec_lock. 2744 */ 2745 if (unlikely(!po->tx_ring.pg_vec)) { 2746 err = -EBUSY; 2747 goto out; 2748 } 2749 if (likely(saddr == NULL)) { 2750 dev = packet_cached_dev_get(po); 2751 proto = READ_ONCE(po->num); 2752 } else { 2753 err = -EINVAL; 2754 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2755 goto out; 2756 if (msg->msg_namelen < (saddr->sll_halen 2757 + offsetof(struct sockaddr_ll, 2758 sll_addr))) 2759 goto out; 2760 proto = saddr->sll_protocol; 2761 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2762 if (po->sk.sk_socket->type == SOCK_DGRAM) { 2763 if (dev && msg->msg_namelen < dev->addr_len + 2764 offsetof(struct sockaddr_ll, sll_addr)) 2765 goto out_put; 2766 addr = saddr->sll_addr; 2767 } 2768 } 2769 2770 err = -ENXIO; 2771 if (unlikely(dev == NULL)) 2772 goto out; 2773 err = -ENETDOWN; 2774 if (unlikely(!(dev->flags & IFF_UP))) 2775 goto out_put; 2776 2777 sockcm_init(&sockc, &po->sk); 2778 if (msg->msg_controllen) { 2779 err = sock_cmsg_send(&po->sk, msg, &sockc); 2780 if (unlikely(err)) 2781 goto out_put; 2782 } 2783 2784 if (po->sk.sk_socket->type == SOCK_RAW) 2785 reserve = dev->hard_header_len; 2786 size_max = po->tx_ring.frame_size 2787 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2788 2789 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz) 2790 size_max = dev->mtu + reserve + VLAN_HLEN; 2791 2792 reinit_completion(&po->skb_completion); 2793 2794 do { 2795 ph = packet_current_frame(po, &po->tx_ring, 2796 TP_STATUS_SEND_REQUEST); 2797 if (unlikely(ph == NULL)) { 2798 if (need_wait && skb) { 2799 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); 2800 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); 2801 if (timeo <= 0) { 2802 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; 2803 goto out_put; 2804 } 2805 } 2806 /* check for additional frames */ 2807 continue; 2808 } 2809 2810 skb = NULL; 2811 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2812 if (tp_len < 0) 2813 goto tpacket_error; 2814 2815 status = TP_STATUS_SEND_REQUEST; 2816 hlen = LL_RESERVED_SPACE(dev); 2817 tlen = dev->needed_tailroom; 2818 if (vnet_hdr_sz) { 2819 vnet_hdr = data; 2820 data += vnet_hdr_sz; 2821 tp_len -= vnet_hdr_sz; 2822 if (tp_len < 0 || 2823 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2824 tp_len = -EINVAL; 2825 goto tpacket_error; 2826 } 2827 copylen = __virtio16_to_cpu(vio_le(), 2828 vnet_hdr->hdr_len); 2829 } 2830 copylen = max_t(int, copylen, dev->hard_header_len); 2831 skb = sock_alloc_send_skb(&po->sk, 2832 hlen + tlen + sizeof(struct sockaddr_ll) + 2833 (copylen - dev->hard_header_len), 2834 !need_wait, &err); 2835 2836 if (unlikely(skb == NULL)) { 2837 /* we assume the socket was initially writeable ... */ 2838 if (likely(len_sum > 0)) 2839 err = len_sum; 2840 goto out_status; 2841 } 2842 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2843 addr, hlen, copylen, &sockc); 2844 if (likely(tp_len >= 0) && 2845 tp_len > dev->mtu + reserve && 2846 !vnet_hdr_sz && 2847 !packet_extra_vlan_len_allowed(dev, skb)) 2848 tp_len = -EMSGSIZE; 2849 2850 if (unlikely(tp_len < 0)) { 2851 tpacket_error: 2852 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) { 2853 __packet_set_status(po, ph, 2854 TP_STATUS_AVAILABLE); 2855 packet_increment_head(&po->tx_ring); 2856 kfree_skb(skb); 2857 continue; 2858 } else { 2859 status = TP_STATUS_WRONG_FORMAT; 2860 err = tp_len; 2861 goto out_status; 2862 } 2863 } 2864 2865 if (vnet_hdr_sz) { 2866 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { 2867 tp_len = -EINVAL; 2868 goto tpacket_error; 2869 } 2870 virtio_net_hdr_set_proto(skb, vnet_hdr); 2871 } 2872 2873 skb->destructor = tpacket_destruct_skb; 2874 __packet_set_status(po, ph, TP_STATUS_SENDING); 2875 packet_inc_pending(&po->tx_ring); 2876 2877 status = TP_STATUS_SEND_REQUEST; 2878 err = packet_xmit(po, skb); 2879 if (unlikely(err != 0)) { 2880 if (err > 0) 2881 err = net_xmit_errno(err); 2882 if (err && __packet_get_status(po, ph) == 2883 TP_STATUS_AVAILABLE) { 2884 /* skb was destructed already */ 2885 skb = NULL; 2886 goto out_status; 2887 } 2888 /* 2889 * skb was dropped but not destructed yet; 2890 * let's treat it like congestion or err < 0 2891 */ 2892 err = 0; 2893 } 2894 packet_increment_head(&po->tx_ring); 2895 len_sum += tp_len; 2896 } while (likely((ph != NULL) || 2897 /* Note: packet_read_pending() might be slow if we have 2898 * to call it as it's per_cpu variable, but in fast-path 2899 * we already short-circuit the loop with the first 2900 * condition, and luckily don't have to go that path 2901 * anyway. 2902 */ 2903 (need_wait && packet_read_pending(&po->tx_ring)))); 2904 2905 err = len_sum; 2906 goto out_put; 2907 2908 out_status: 2909 __packet_set_status(po, ph, status); 2910 kfree_skb(skb); 2911 out_put: 2912 dev_put(dev); 2913 out: 2914 mutex_unlock(&po->pg_vec_lock); 2915 return err; 2916 } 2917 2918 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2919 size_t reserve, size_t len, 2920 size_t linear, int noblock, 2921 int *err) 2922 { 2923 struct sk_buff *skb; 2924 2925 /* Under a page? Don't bother with paged skb. */ 2926 if (prepad + len < PAGE_SIZE || !linear) 2927 linear = len; 2928 2929 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 2930 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 2931 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2932 err, PAGE_ALLOC_COSTLY_ORDER); 2933 if (!skb) 2934 return NULL; 2935 2936 skb_reserve(skb, reserve); 2937 skb_put(skb, linear); 2938 skb->data_len = len - linear; 2939 skb->len += len - linear; 2940 2941 return skb; 2942 } 2943 2944 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2945 { 2946 struct sock *sk = sock->sk; 2947 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2948 struct sk_buff *skb; 2949 struct net_device *dev; 2950 __be16 proto; 2951 unsigned char *addr = NULL; 2952 int err, reserve = 0; 2953 struct sockcm_cookie sockc; 2954 struct virtio_net_hdr vnet_hdr = { 0 }; 2955 int offset = 0; 2956 struct packet_sock *po = pkt_sk(sk); 2957 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2958 int hlen, tlen, linear; 2959 int extra_len = 0; 2960 2961 /* 2962 * Get and verify the address. 2963 */ 2964 2965 if (likely(saddr == NULL)) { 2966 dev = packet_cached_dev_get(po); 2967 proto = READ_ONCE(po->num); 2968 } else { 2969 err = -EINVAL; 2970 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2971 goto out; 2972 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2973 goto out; 2974 proto = saddr->sll_protocol; 2975 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2976 if (sock->type == SOCK_DGRAM) { 2977 if (dev && msg->msg_namelen < dev->addr_len + 2978 offsetof(struct sockaddr_ll, sll_addr)) 2979 goto out_unlock; 2980 addr = saddr->sll_addr; 2981 } 2982 } 2983 2984 err = -ENXIO; 2985 if (unlikely(dev == NULL)) 2986 goto out_unlock; 2987 err = -ENETDOWN; 2988 if (unlikely(!(dev->flags & IFF_UP))) 2989 goto out_unlock; 2990 2991 sockcm_init(&sockc, sk); 2992 sockc.mark = READ_ONCE(sk->sk_mark); 2993 if (msg->msg_controllen) { 2994 err = sock_cmsg_send(sk, msg, &sockc); 2995 if (unlikely(err)) 2996 goto out_unlock; 2997 } 2998 2999 if (sock->type == SOCK_RAW) 3000 reserve = dev->hard_header_len; 3001 if (vnet_hdr_sz) { 3002 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz); 3003 if (err) 3004 goto out_unlock; 3005 } 3006 3007 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 3008 if (!netif_supports_nofcs(dev)) { 3009 err = -EPROTONOSUPPORT; 3010 goto out_unlock; 3011 } 3012 extra_len = 4; /* We're doing our own CRC */ 3013 } 3014 3015 err = -EMSGSIZE; 3016 if (!vnet_hdr.gso_type && 3017 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 3018 goto out_unlock; 3019 3020 err = -ENOBUFS; 3021 hlen = LL_RESERVED_SPACE(dev); 3022 tlen = dev->needed_tailroom; 3023 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 3024 linear = max(linear, min_t(int, len, dev->hard_header_len)); 3025 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 3026 msg->msg_flags & MSG_DONTWAIT, &err); 3027 if (skb == NULL) 3028 goto out_unlock; 3029 3030 skb_reset_network_header(skb); 3031 3032 err = -EINVAL; 3033 if (sock->type == SOCK_DGRAM) { 3034 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 3035 if (unlikely(offset < 0)) 3036 goto out_free; 3037 } else if (reserve) { 3038 skb_reserve(skb, -reserve); 3039 if (len < reserve + sizeof(struct ipv6hdr) && 3040 dev->min_header_len != dev->hard_header_len) 3041 skb_reset_network_header(skb); 3042 } 3043 3044 /* Returns -EFAULT on error */ 3045 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 3046 if (err) 3047 goto out_free; 3048 3049 if ((sock->type == SOCK_RAW && 3050 !dev_validate_header(dev, skb->data, len)) || !skb->len) { 3051 err = -EINVAL; 3052 goto out_free; 3053 } 3054 3055 skb_setup_tx_timestamp(skb, sockc.tsflags); 3056 3057 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 3058 !packet_extra_vlan_len_allowed(dev, skb)) { 3059 err = -EMSGSIZE; 3060 goto out_free; 3061 } 3062 3063 skb->protocol = proto; 3064 skb->dev = dev; 3065 skb->priority = READ_ONCE(sk->sk_priority); 3066 skb->mark = sockc.mark; 3067 skb->tstamp = sockc.transmit_time; 3068 skb->mono_delivery_time = !!skb->tstamp; 3069 3070 if (unlikely(extra_len == 4)) 3071 skb->no_fcs = 1; 3072 3073 packet_parse_headers(skb, sock); 3074 3075 if (vnet_hdr_sz) { 3076 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 3077 if (err) 3078 goto out_free; 3079 len += vnet_hdr_sz; 3080 virtio_net_hdr_set_proto(skb, &vnet_hdr); 3081 } 3082 3083 err = packet_xmit(po, skb); 3084 3085 if (unlikely(err != 0)) { 3086 if (err > 0) 3087 err = net_xmit_errno(err); 3088 if (err) 3089 goto out_unlock; 3090 } 3091 3092 dev_put(dev); 3093 3094 return len; 3095 3096 out_free: 3097 kfree_skb(skb); 3098 out_unlock: 3099 dev_put(dev); 3100 out: 3101 return err; 3102 } 3103 3104 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 3105 { 3106 struct sock *sk = sock->sk; 3107 struct packet_sock *po = pkt_sk(sk); 3108 3109 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy. 3110 * tpacket_snd() will redo the check safely. 3111 */ 3112 if (data_race(po->tx_ring.pg_vec)) 3113 return tpacket_snd(po, msg); 3114 3115 return packet_snd(sock, msg, len); 3116 } 3117 3118 /* 3119 * Close a PACKET socket. This is fairly simple. We immediately go 3120 * to 'closed' state and remove our protocol entry in the device list. 3121 */ 3122 3123 static int packet_release(struct socket *sock) 3124 { 3125 struct sock *sk = sock->sk; 3126 struct packet_sock *po; 3127 struct packet_fanout *f; 3128 struct net *net; 3129 union tpacket_req_u req_u; 3130 3131 if (!sk) 3132 return 0; 3133 3134 net = sock_net(sk); 3135 po = pkt_sk(sk); 3136 3137 mutex_lock(&net->packet.sklist_lock); 3138 sk_del_node_init_rcu(sk); 3139 mutex_unlock(&net->packet.sklist_lock); 3140 3141 sock_prot_inuse_add(net, sk->sk_prot, -1); 3142 3143 spin_lock(&po->bind_lock); 3144 unregister_prot_hook(sk, false); 3145 packet_cached_dev_reset(po); 3146 3147 if (po->prot_hook.dev) { 3148 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker); 3149 po->prot_hook.dev = NULL; 3150 } 3151 spin_unlock(&po->bind_lock); 3152 3153 packet_flush_mclist(sk); 3154 3155 lock_sock(sk); 3156 if (po->rx_ring.pg_vec) { 3157 memset(&req_u, 0, sizeof(req_u)); 3158 packet_set_ring(sk, &req_u, 1, 0); 3159 } 3160 3161 if (po->tx_ring.pg_vec) { 3162 memset(&req_u, 0, sizeof(req_u)); 3163 packet_set_ring(sk, &req_u, 1, 1); 3164 } 3165 release_sock(sk); 3166 3167 f = fanout_release(sk); 3168 3169 synchronize_net(); 3170 3171 kfree(po->rollover); 3172 if (f) { 3173 fanout_release_data(f); 3174 kvfree(f); 3175 } 3176 /* 3177 * Now the socket is dead. No more input will appear. 3178 */ 3179 sock_orphan(sk); 3180 sock->sk = NULL; 3181 3182 /* Purge queues */ 3183 3184 skb_queue_purge(&sk->sk_receive_queue); 3185 packet_free_pending(po); 3186 3187 sock_put(sk); 3188 return 0; 3189 } 3190 3191 /* 3192 * Attach a packet hook. 3193 */ 3194 3195 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3196 __be16 proto) 3197 { 3198 struct packet_sock *po = pkt_sk(sk); 3199 struct net_device *dev = NULL; 3200 bool unlisted = false; 3201 bool need_rehook; 3202 int ret = 0; 3203 3204 lock_sock(sk); 3205 spin_lock(&po->bind_lock); 3206 if (!proto) 3207 proto = po->num; 3208 3209 rcu_read_lock(); 3210 3211 if (po->fanout) { 3212 ret = -EINVAL; 3213 goto out_unlock; 3214 } 3215 3216 if (name) { 3217 dev = dev_get_by_name_rcu(sock_net(sk), name); 3218 if (!dev) { 3219 ret = -ENODEV; 3220 goto out_unlock; 3221 } 3222 } else if (ifindex) { 3223 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3224 if (!dev) { 3225 ret = -ENODEV; 3226 goto out_unlock; 3227 } 3228 } 3229 3230 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev; 3231 3232 if (need_rehook) { 3233 dev_hold(dev); 3234 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 3235 rcu_read_unlock(); 3236 /* prevents packet_notifier() from calling 3237 * register_prot_hook() 3238 */ 3239 WRITE_ONCE(po->num, 0); 3240 __unregister_prot_hook(sk, true); 3241 rcu_read_lock(); 3242 if (dev) 3243 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3244 dev->ifindex); 3245 } 3246 3247 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING)); 3248 WRITE_ONCE(po->num, proto); 3249 po->prot_hook.type = proto; 3250 3251 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker); 3252 3253 if (unlikely(unlisted)) { 3254 po->prot_hook.dev = NULL; 3255 WRITE_ONCE(po->ifindex, -1); 3256 packet_cached_dev_reset(po); 3257 } else { 3258 netdev_hold(dev, &po->prot_hook.dev_tracker, 3259 GFP_ATOMIC); 3260 po->prot_hook.dev = dev; 3261 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); 3262 packet_cached_dev_assign(po, dev); 3263 } 3264 dev_put(dev); 3265 } 3266 3267 if (proto == 0 || !need_rehook) 3268 goto out_unlock; 3269 3270 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3271 register_prot_hook(sk); 3272 } else { 3273 sk->sk_err = ENETDOWN; 3274 if (!sock_flag(sk, SOCK_DEAD)) 3275 sk_error_report(sk); 3276 } 3277 3278 out_unlock: 3279 rcu_read_unlock(); 3280 spin_unlock(&po->bind_lock); 3281 release_sock(sk); 3282 return ret; 3283 } 3284 3285 /* 3286 * Bind a packet socket to a device 3287 */ 3288 3289 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, 3290 int addr_len) 3291 { 3292 struct sock *sk = sock->sk; 3293 char name[sizeof(uaddr->sa_data_min) + 1]; 3294 3295 /* 3296 * Check legality 3297 */ 3298 3299 if (addr_len != sizeof(struct sockaddr)) 3300 return -EINVAL; 3301 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3302 * zero-terminated. 3303 */ 3304 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); 3305 name[sizeof(uaddr->sa_data_min)] = 0; 3306 3307 return packet_do_bind(sk, name, 0, 0); 3308 } 3309 3310 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 3311 { 3312 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3313 struct sock *sk = sock->sk; 3314 3315 /* 3316 * Check legality 3317 */ 3318 3319 if (addr_len < sizeof(struct sockaddr_ll)) 3320 return -EINVAL; 3321 if (sll->sll_family != AF_PACKET) 3322 return -EINVAL; 3323 3324 return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol); 3325 } 3326 3327 static struct proto packet_proto = { 3328 .name = "PACKET", 3329 .owner = THIS_MODULE, 3330 .obj_size = sizeof(struct packet_sock), 3331 }; 3332 3333 /* 3334 * Create a packet of type SOCK_PACKET. 3335 */ 3336 3337 static int packet_create(struct net *net, struct socket *sock, int protocol, 3338 int kern) 3339 { 3340 struct sock *sk; 3341 struct packet_sock *po; 3342 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3343 int err; 3344 3345 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3346 return -EPERM; 3347 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3348 sock->type != SOCK_PACKET) 3349 return -ESOCKTNOSUPPORT; 3350 3351 sock->state = SS_UNCONNECTED; 3352 3353 err = -ENOBUFS; 3354 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3355 if (sk == NULL) 3356 goto out; 3357 3358 sock->ops = &packet_ops; 3359 if (sock->type == SOCK_PACKET) 3360 sock->ops = &packet_ops_spkt; 3361 3362 sock_init_data(sock, sk); 3363 3364 po = pkt_sk(sk); 3365 init_completion(&po->skb_completion); 3366 sk->sk_family = PF_PACKET; 3367 po->num = proto; 3368 3369 err = packet_alloc_pending(po); 3370 if (err) 3371 goto out2; 3372 3373 packet_cached_dev_reset(po); 3374 3375 sk->sk_destruct = packet_sock_destruct; 3376 3377 /* 3378 * Attach a protocol block 3379 */ 3380 3381 spin_lock_init(&po->bind_lock); 3382 mutex_init(&po->pg_vec_lock); 3383 po->rollover = NULL; 3384 po->prot_hook.func = packet_rcv; 3385 3386 if (sock->type == SOCK_PACKET) 3387 po->prot_hook.func = packet_rcv_spkt; 3388 3389 po->prot_hook.af_packet_priv = sk; 3390 po->prot_hook.af_packet_net = sock_net(sk); 3391 3392 if (proto) { 3393 po->prot_hook.type = proto; 3394 __register_prot_hook(sk); 3395 } 3396 3397 mutex_lock(&net->packet.sklist_lock); 3398 sk_add_node_tail_rcu(sk, &net->packet.sklist); 3399 mutex_unlock(&net->packet.sklist_lock); 3400 3401 sock_prot_inuse_add(net, &packet_proto, 1); 3402 3403 return 0; 3404 out2: 3405 sk_free(sk); 3406 out: 3407 return err; 3408 } 3409 3410 /* 3411 * Pull a packet from our receive queue and hand it to the user. 3412 * If necessary we block. 3413 */ 3414 3415 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3416 int flags) 3417 { 3418 struct sock *sk = sock->sk; 3419 struct sk_buff *skb; 3420 int copied, err; 3421 int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz); 3422 unsigned int origlen = 0; 3423 3424 err = -EINVAL; 3425 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3426 goto out; 3427 3428 #if 0 3429 /* What error should we return now? EUNATTACH? */ 3430 if (pkt_sk(sk)->ifindex < 0) 3431 return -ENODEV; 3432 #endif 3433 3434 if (flags & MSG_ERRQUEUE) { 3435 err = sock_recv_errqueue(sk, msg, len, 3436 SOL_PACKET, PACKET_TX_TIMESTAMP); 3437 goto out; 3438 } 3439 3440 /* 3441 * Call the generic datagram receiver. This handles all sorts 3442 * of horrible races and re-entrancy so we can forget about it 3443 * in the protocol layers. 3444 * 3445 * Now it will return ENETDOWN, if device have just gone down, 3446 * but then it will block. 3447 */ 3448 3449 skb = skb_recv_datagram(sk, flags, &err); 3450 3451 /* 3452 * An error occurred so return it. Because skb_recv_datagram() 3453 * handles the blocking we don't see and worry about blocking 3454 * retries. 3455 */ 3456 3457 if (skb == NULL) 3458 goto out; 3459 3460 packet_rcv_try_clear_pressure(pkt_sk(sk)); 3461 3462 if (vnet_hdr_len) { 3463 err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len); 3464 if (err) 3465 goto out_free; 3466 } 3467 3468 /* You lose any data beyond the buffer you gave. If it worries 3469 * a user program they can ask the device for its MTU 3470 * anyway. 3471 */ 3472 copied = skb->len; 3473 if (copied > len) { 3474 copied = len; 3475 msg->msg_flags |= MSG_TRUNC; 3476 } 3477 3478 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3479 if (err) 3480 goto out_free; 3481 3482 if (sock->type != SOCK_PACKET) { 3483 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3484 3485 /* Original length was stored in sockaddr_ll fields */ 3486 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3487 sll->sll_family = AF_PACKET; 3488 sll->sll_protocol = skb->protocol; 3489 } 3490 3491 sock_recv_cmsgs(msg, sk, skb); 3492 3493 if (msg->msg_name) { 3494 const size_t max_len = min(sizeof(skb->cb), 3495 sizeof(struct sockaddr_storage)); 3496 int copy_len; 3497 3498 /* If the address length field is there to be filled 3499 * in, we fill it in now. 3500 */ 3501 if (sock->type == SOCK_PACKET) { 3502 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3503 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3504 copy_len = msg->msg_namelen; 3505 } else { 3506 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3507 3508 msg->msg_namelen = sll->sll_halen + 3509 offsetof(struct sockaddr_ll, sll_addr); 3510 copy_len = msg->msg_namelen; 3511 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { 3512 memset(msg->msg_name + 3513 offsetof(struct sockaddr_ll, sll_addr), 3514 0, sizeof(sll->sll_addr)); 3515 msg->msg_namelen = sizeof(struct sockaddr_ll); 3516 } 3517 } 3518 if (WARN_ON_ONCE(copy_len > max_len)) { 3519 copy_len = max_len; 3520 msg->msg_namelen = copy_len; 3521 } 3522 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); 3523 } 3524 3525 if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) { 3526 struct tpacket_auxdata aux; 3527 3528 aux.tp_status = TP_STATUS_USER; 3529 if (skb->ip_summed == CHECKSUM_PARTIAL) 3530 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3531 else if (skb->pkt_type != PACKET_OUTGOING && 3532 skb_csum_unnecessary(skb)) 3533 aux.tp_status |= TP_STATUS_CSUM_VALID; 3534 if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) 3535 aux.tp_status |= TP_STATUS_GSO_TCP; 3536 3537 aux.tp_len = origlen; 3538 aux.tp_snaplen = skb->len; 3539 aux.tp_mac = 0; 3540 aux.tp_net = skb_network_offset(skb); 3541 if (skb_vlan_tag_present(skb)) { 3542 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3543 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3544 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3545 } else { 3546 aux.tp_vlan_tci = 0; 3547 aux.tp_vlan_tpid = 0; 3548 } 3549 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3550 } 3551 3552 /* 3553 * Free or return the buffer as appropriate. Again this 3554 * hides all the races and re-entrancy issues from us. 3555 */ 3556 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3557 3558 out_free: 3559 skb_free_datagram(sk, skb); 3560 out: 3561 return err; 3562 } 3563 3564 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3565 int peer) 3566 { 3567 struct net_device *dev; 3568 struct sock *sk = sock->sk; 3569 3570 if (peer) 3571 return -EOPNOTSUPP; 3572 3573 uaddr->sa_family = AF_PACKET; 3574 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min)); 3575 rcu_read_lock(); 3576 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); 3577 if (dev) 3578 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min)); 3579 rcu_read_unlock(); 3580 3581 return sizeof(*uaddr); 3582 } 3583 3584 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3585 int peer) 3586 { 3587 struct net_device *dev; 3588 struct sock *sk = sock->sk; 3589 struct packet_sock *po = pkt_sk(sk); 3590 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3591 int ifindex; 3592 3593 if (peer) 3594 return -EOPNOTSUPP; 3595 3596 ifindex = READ_ONCE(po->ifindex); 3597 sll->sll_family = AF_PACKET; 3598 sll->sll_ifindex = ifindex; 3599 sll->sll_protocol = READ_ONCE(po->num); 3600 sll->sll_pkttype = 0; 3601 rcu_read_lock(); 3602 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3603 if (dev) { 3604 sll->sll_hatype = dev->type; 3605 sll->sll_halen = dev->addr_len; 3606 3607 /* Let __fortify_memcpy_chk() know the actual buffer size. */ 3608 memcpy(((struct sockaddr_storage *)sll)->__data + 3609 offsetof(struct sockaddr_ll, sll_addr) - 3610 offsetofend(struct sockaddr_ll, sll_family), 3611 dev->dev_addr, dev->addr_len); 3612 } else { 3613 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3614 sll->sll_halen = 0; 3615 } 3616 rcu_read_unlock(); 3617 3618 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3619 } 3620 3621 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3622 int what) 3623 { 3624 switch (i->type) { 3625 case PACKET_MR_MULTICAST: 3626 if (i->alen != dev->addr_len) 3627 return -EINVAL; 3628 if (what > 0) 3629 return dev_mc_add(dev, i->addr); 3630 else 3631 return dev_mc_del(dev, i->addr); 3632 break; 3633 case PACKET_MR_PROMISC: 3634 return dev_set_promiscuity(dev, what); 3635 case PACKET_MR_ALLMULTI: 3636 return dev_set_allmulti(dev, what); 3637 case PACKET_MR_UNICAST: 3638 if (i->alen != dev->addr_len) 3639 return -EINVAL; 3640 if (what > 0) 3641 return dev_uc_add(dev, i->addr); 3642 else 3643 return dev_uc_del(dev, i->addr); 3644 break; 3645 default: 3646 break; 3647 } 3648 return 0; 3649 } 3650 3651 static void packet_dev_mclist_delete(struct net_device *dev, 3652 struct packet_mclist **mlp) 3653 { 3654 struct packet_mclist *ml; 3655 3656 while ((ml = *mlp) != NULL) { 3657 if (ml->ifindex == dev->ifindex) { 3658 packet_dev_mc(dev, ml, -1); 3659 *mlp = ml->next; 3660 kfree(ml); 3661 } else 3662 mlp = &ml->next; 3663 } 3664 } 3665 3666 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3667 { 3668 struct packet_sock *po = pkt_sk(sk); 3669 struct packet_mclist *ml, *i; 3670 struct net_device *dev; 3671 int err; 3672 3673 rtnl_lock(); 3674 3675 err = -ENODEV; 3676 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3677 if (!dev) 3678 goto done; 3679 3680 err = -EINVAL; 3681 if (mreq->mr_alen > dev->addr_len) 3682 goto done; 3683 3684 err = -ENOBUFS; 3685 i = kmalloc(sizeof(*i), GFP_KERNEL); 3686 if (i == NULL) 3687 goto done; 3688 3689 err = 0; 3690 for (ml = po->mclist; ml; ml = ml->next) { 3691 if (ml->ifindex == mreq->mr_ifindex && 3692 ml->type == mreq->mr_type && 3693 ml->alen == mreq->mr_alen && 3694 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3695 ml->count++; 3696 /* Free the new element ... */ 3697 kfree(i); 3698 goto done; 3699 } 3700 } 3701 3702 i->type = mreq->mr_type; 3703 i->ifindex = mreq->mr_ifindex; 3704 i->alen = mreq->mr_alen; 3705 memcpy(i->addr, mreq->mr_address, i->alen); 3706 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3707 i->count = 1; 3708 i->next = po->mclist; 3709 po->mclist = i; 3710 err = packet_dev_mc(dev, i, 1); 3711 if (err) { 3712 po->mclist = i->next; 3713 kfree(i); 3714 } 3715 3716 done: 3717 rtnl_unlock(); 3718 return err; 3719 } 3720 3721 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3722 { 3723 struct packet_mclist *ml, **mlp; 3724 3725 rtnl_lock(); 3726 3727 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3728 if (ml->ifindex == mreq->mr_ifindex && 3729 ml->type == mreq->mr_type && 3730 ml->alen == mreq->mr_alen && 3731 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3732 if (--ml->count == 0) { 3733 struct net_device *dev; 3734 *mlp = ml->next; 3735 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3736 if (dev) 3737 packet_dev_mc(dev, ml, -1); 3738 kfree(ml); 3739 } 3740 break; 3741 } 3742 } 3743 rtnl_unlock(); 3744 return 0; 3745 } 3746 3747 static void packet_flush_mclist(struct sock *sk) 3748 { 3749 struct packet_sock *po = pkt_sk(sk); 3750 struct packet_mclist *ml; 3751 3752 if (!po->mclist) 3753 return; 3754 3755 rtnl_lock(); 3756 while ((ml = po->mclist) != NULL) { 3757 struct net_device *dev; 3758 3759 po->mclist = ml->next; 3760 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3761 if (dev != NULL) 3762 packet_dev_mc(dev, ml, -1); 3763 kfree(ml); 3764 } 3765 rtnl_unlock(); 3766 } 3767 3768 static int 3769 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, 3770 unsigned int optlen) 3771 { 3772 struct sock *sk = sock->sk; 3773 struct packet_sock *po = pkt_sk(sk); 3774 int ret; 3775 3776 if (level != SOL_PACKET) 3777 return -ENOPROTOOPT; 3778 3779 switch (optname) { 3780 case PACKET_ADD_MEMBERSHIP: 3781 case PACKET_DROP_MEMBERSHIP: 3782 { 3783 struct packet_mreq_max mreq; 3784 int len = optlen; 3785 memset(&mreq, 0, sizeof(mreq)); 3786 if (len < sizeof(struct packet_mreq)) 3787 return -EINVAL; 3788 if (len > sizeof(mreq)) 3789 len = sizeof(mreq); 3790 if (copy_from_sockptr(&mreq, optval, len)) 3791 return -EFAULT; 3792 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3793 return -EINVAL; 3794 if (optname == PACKET_ADD_MEMBERSHIP) 3795 ret = packet_mc_add(sk, &mreq); 3796 else 3797 ret = packet_mc_drop(sk, &mreq); 3798 return ret; 3799 } 3800 3801 case PACKET_RX_RING: 3802 case PACKET_TX_RING: 3803 { 3804 union tpacket_req_u req_u; 3805 int len; 3806 3807 lock_sock(sk); 3808 switch (po->tp_version) { 3809 case TPACKET_V1: 3810 case TPACKET_V2: 3811 len = sizeof(req_u.req); 3812 break; 3813 case TPACKET_V3: 3814 default: 3815 len = sizeof(req_u.req3); 3816 break; 3817 } 3818 if (optlen < len) { 3819 ret = -EINVAL; 3820 } else { 3821 if (copy_from_sockptr(&req_u.req, optval, len)) 3822 ret = -EFAULT; 3823 else 3824 ret = packet_set_ring(sk, &req_u, 0, 3825 optname == PACKET_TX_RING); 3826 } 3827 release_sock(sk); 3828 return ret; 3829 } 3830 case PACKET_COPY_THRESH: 3831 { 3832 int val; 3833 3834 if (optlen != sizeof(val)) 3835 return -EINVAL; 3836 if (copy_from_sockptr(&val, optval, sizeof(val))) 3837 return -EFAULT; 3838 3839 WRITE_ONCE(pkt_sk(sk)->copy_thresh, val); 3840 return 0; 3841 } 3842 case PACKET_VERSION: 3843 { 3844 int val; 3845 3846 if (optlen != sizeof(val)) 3847 return -EINVAL; 3848 if (copy_from_sockptr(&val, optval, sizeof(val))) 3849 return -EFAULT; 3850 switch (val) { 3851 case TPACKET_V1: 3852 case TPACKET_V2: 3853 case TPACKET_V3: 3854 break; 3855 default: 3856 return -EINVAL; 3857 } 3858 lock_sock(sk); 3859 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3860 ret = -EBUSY; 3861 } else { 3862 po->tp_version = val; 3863 ret = 0; 3864 } 3865 release_sock(sk); 3866 return ret; 3867 } 3868 case PACKET_RESERVE: 3869 { 3870 unsigned int val; 3871 3872 if (optlen != sizeof(val)) 3873 return -EINVAL; 3874 if (copy_from_sockptr(&val, optval, sizeof(val))) 3875 return -EFAULT; 3876 if (val > INT_MAX) 3877 return -EINVAL; 3878 lock_sock(sk); 3879 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3880 ret = -EBUSY; 3881 } else { 3882 po->tp_reserve = val; 3883 ret = 0; 3884 } 3885 release_sock(sk); 3886 return ret; 3887 } 3888 case PACKET_LOSS: 3889 { 3890 unsigned int val; 3891 3892 if (optlen != sizeof(val)) 3893 return -EINVAL; 3894 if (copy_from_sockptr(&val, optval, sizeof(val))) 3895 return -EFAULT; 3896 3897 lock_sock(sk); 3898 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3899 ret = -EBUSY; 3900 } else { 3901 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val); 3902 ret = 0; 3903 } 3904 release_sock(sk); 3905 return ret; 3906 } 3907 case PACKET_AUXDATA: 3908 { 3909 int val; 3910 3911 if (optlen < sizeof(val)) 3912 return -EINVAL; 3913 if (copy_from_sockptr(&val, optval, sizeof(val))) 3914 return -EFAULT; 3915 3916 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val); 3917 return 0; 3918 } 3919 case PACKET_ORIGDEV: 3920 { 3921 int val; 3922 3923 if (optlen < sizeof(val)) 3924 return -EINVAL; 3925 if (copy_from_sockptr(&val, optval, sizeof(val))) 3926 return -EFAULT; 3927 3928 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val); 3929 return 0; 3930 } 3931 case PACKET_VNET_HDR: 3932 case PACKET_VNET_HDR_SZ: 3933 { 3934 int val, hdr_len; 3935 3936 if (sock->type != SOCK_RAW) 3937 return -EINVAL; 3938 if (optlen < sizeof(val)) 3939 return -EINVAL; 3940 if (copy_from_sockptr(&val, optval, sizeof(val))) 3941 return -EFAULT; 3942 3943 if (optname == PACKET_VNET_HDR_SZ) { 3944 if (val && val != sizeof(struct virtio_net_hdr) && 3945 val != sizeof(struct virtio_net_hdr_mrg_rxbuf)) 3946 return -EINVAL; 3947 hdr_len = val; 3948 } else { 3949 hdr_len = val ? sizeof(struct virtio_net_hdr) : 0; 3950 } 3951 lock_sock(sk); 3952 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3953 ret = -EBUSY; 3954 } else { 3955 WRITE_ONCE(po->vnet_hdr_sz, hdr_len); 3956 ret = 0; 3957 } 3958 release_sock(sk); 3959 return ret; 3960 } 3961 case PACKET_TIMESTAMP: 3962 { 3963 int val; 3964 3965 if (optlen != sizeof(val)) 3966 return -EINVAL; 3967 if (copy_from_sockptr(&val, optval, sizeof(val))) 3968 return -EFAULT; 3969 3970 WRITE_ONCE(po->tp_tstamp, val); 3971 return 0; 3972 } 3973 case PACKET_FANOUT: 3974 { 3975 struct fanout_args args = { 0 }; 3976 3977 if (optlen != sizeof(int) && optlen != sizeof(args)) 3978 return -EINVAL; 3979 if (copy_from_sockptr(&args, optval, optlen)) 3980 return -EFAULT; 3981 3982 return fanout_add(sk, &args); 3983 } 3984 case PACKET_FANOUT_DATA: 3985 { 3986 /* Paired with the WRITE_ONCE() in fanout_add() */ 3987 if (!READ_ONCE(po->fanout)) 3988 return -EINVAL; 3989 3990 return fanout_set_data(po, optval, optlen); 3991 } 3992 case PACKET_IGNORE_OUTGOING: 3993 { 3994 int val; 3995 3996 if (optlen != sizeof(val)) 3997 return -EINVAL; 3998 if (copy_from_sockptr(&val, optval, sizeof(val))) 3999 return -EFAULT; 4000 if (val < 0 || val > 1) 4001 return -EINVAL; 4002 4003 po->prot_hook.ignore_outgoing = !!val; 4004 return 0; 4005 } 4006 case PACKET_TX_HAS_OFF: 4007 { 4008 unsigned int val; 4009 4010 if (optlen != sizeof(val)) 4011 return -EINVAL; 4012 if (copy_from_sockptr(&val, optval, sizeof(val))) 4013 return -EFAULT; 4014 4015 lock_sock(sk); 4016 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec) 4017 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val); 4018 4019 release_sock(sk); 4020 return 0; 4021 } 4022 case PACKET_QDISC_BYPASS: 4023 { 4024 int val; 4025 4026 if (optlen != sizeof(val)) 4027 return -EINVAL; 4028 if (copy_from_sockptr(&val, optval, sizeof(val))) 4029 return -EFAULT; 4030 4031 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val); 4032 return 0; 4033 } 4034 default: 4035 return -ENOPROTOOPT; 4036 } 4037 } 4038 4039 static int packet_getsockopt(struct socket *sock, int level, int optname, 4040 char __user *optval, int __user *optlen) 4041 { 4042 int len; 4043 int val, lv = sizeof(val); 4044 struct sock *sk = sock->sk; 4045 struct packet_sock *po = pkt_sk(sk); 4046 void *data = &val; 4047 union tpacket_stats_u st; 4048 struct tpacket_rollover_stats rstats; 4049 int drops; 4050 4051 if (level != SOL_PACKET) 4052 return -ENOPROTOOPT; 4053 4054 if (get_user(len, optlen)) 4055 return -EFAULT; 4056 4057 if (len < 0) 4058 return -EINVAL; 4059 4060 switch (optname) { 4061 case PACKET_STATISTICS: 4062 spin_lock_bh(&sk->sk_receive_queue.lock); 4063 memcpy(&st, &po->stats, sizeof(st)); 4064 memset(&po->stats, 0, sizeof(po->stats)); 4065 spin_unlock_bh(&sk->sk_receive_queue.lock); 4066 drops = atomic_xchg(&po->tp_drops, 0); 4067 4068 if (po->tp_version == TPACKET_V3) { 4069 lv = sizeof(struct tpacket_stats_v3); 4070 st.stats3.tp_drops = drops; 4071 st.stats3.tp_packets += drops; 4072 data = &st.stats3; 4073 } else { 4074 lv = sizeof(struct tpacket_stats); 4075 st.stats1.tp_drops = drops; 4076 st.stats1.tp_packets += drops; 4077 data = &st.stats1; 4078 } 4079 4080 break; 4081 case PACKET_AUXDATA: 4082 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA); 4083 break; 4084 case PACKET_ORIGDEV: 4085 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV); 4086 break; 4087 case PACKET_VNET_HDR: 4088 val = !!READ_ONCE(po->vnet_hdr_sz); 4089 break; 4090 case PACKET_VNET_HDR_SZ: 4091 val = READ_ONCE(po->vnet_hdr_sz); 4092 break; 4093 case PACKET_COPY_THRESH: 4094 val = READ_ONCE(pkt_sk(sk)->copy_thresh); 4095 break; 4096 case PACKET_VERSION: 4097 val = po->tp_version; 4098 break; 4099 case PACKET_HDRLEN: 4100 if (len > sizeof(int)) 4101 len = sizeof(int); 4102 if (len < sizeof(int)) 4103 return -EINVAL; 4104 if (copy_from_user(&val, optval, len)) 4105 return -EFAULT; 4106 switch (val) { 4107 case TPACKET_V1: 4108 val = sizeof(struct tpacket_hdr); 4109 break; 4110 case TPACKET_V2: 4111 val = sizeof(struct tpacket2_hdr); 4112 break; 4113 case TPACKET_V3: 4114 val = sizeof(struct tpacket3_hdr); 4115 break; 4116 default: 4117 return -EINVAL; 4118 } 4119 break; 4120 case PACKET_RESERVE: 4121 val = po->tp_reserve; 4122 break; 4123 case PACKET_LOSS: 4124 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS); 4125 break; 4126 case PACKET_TIMESTAMP: 4127 val = READ_ONCE(po->tp_tstamp); 4128 break; 4129 case PACKET_FANOUT: 4130 val = (po->fanout ? 4131 ((u32)po->fanout->id | 4132 ((u32)po->fanout->type << 16) | 4133 ((u32)po->fanout->flags << 24)) : 4134 0); 4135 break; 4136 case PACKET_IGNORE_OUTGOING: 4137 val = po->prot_hook.ignore_outgoing; 4138 break; 4139 case PACKET_ROLLOVER_STATS: 4140 if (!po->rollover) 4141 return -EINVAL; 4142 rstats.tp_all = atomic_long_read(&po->rollover->num); 4143 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 4144 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 4145 data = &rstats; 4146 lv = sizeof(rstats); 4147 break; 4148 case PACKET_TX_HAS_OFF: 4149 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF); 4150 break; 4151 case PACKET_QDISC_BYPASS: 4152 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS); 4153 break; 4154 default: 4155 return -ENOPROTOOPT; 4156 } 4157 4158 if (len > lv) 4159 len = lv; 4160 if (put_user(len, optlen)) 4161 return -EFAULT; 4162 if (copy_to_user(optval, data, len)) 4163 return -EFAULT; 4164 return 0; 4165 } 4166 4167 static int packet_notifier(struct notifier_block *this, 4168 unsigned long msg, void *ptr) 4169 { 4170 struct sock *sk; 4171 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4172 struct net *net = dev_net(dev); 4173 4174 rcu_read_lock(); 4175 sk_for_each_rcu(sk, &net->packet.sklist) { 4176 struct packet_sock *po = pkt_sk(sk); 4177 4178 switch (msg) { 4179 case NETDEV_UNREGISTER: 4180 if (po->mclist) 4181 packet_dev_mclist_delete(dev, &po->mclist); 4182 fallthrough; 4183 4184 case NETDEV_DOWN: 4185 if (dev->ifindex == po->ifindex) { 4186 spin_lock(&po->bind_lock); 4187 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 4188 __unregister_prot_hook(sk, false); 4189 sk->sk_err = ENETDOWN; 4190 if (!sock_flag(sk, SOCK_DEAD)) 4191 sk_error_report(sk); 4192 } 4193 if (msg == NETDEV_UNREGISTER) { 4194 packet_cached_dev_reset(po); 4195 WRITE_ONCE(po->ifindex, -1); 4196 netdev_put(po->prot_hook.dev, 4197 &po->prot_hook.dev_tracker); 4198 po->prot_hook.dev = NULL; 4199 } 4200 spin_unlock(&po->bind_lock); 4201 } 4202 break; 4203 case NETDEV_UP: 4204 if (dev->ifindex == po->ifindex) { 4205 spin_lock(&po->bind_lock); 4206 if (po->num) 4207 register_prot_hook(sk); 4208 spin_unlock(&po->bind_lock); 4209 } 4210 break; 4211 } 4212 } 4213 rcu_read_unlock(); 4214 return NOTIFY_DONE; 4215 } 4216 4217 4218 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4219 unsigned long arg) 4220 { 4221 struct sock *sk = sock->sk; 4222 4223 switch (cmd) { 4224 case SIOCOUTQ: 4225 { 4226 int amount = sk_wmem_alloc_get(sk); 4227 4228 return put_user(amount, (int __user *)arg); 4229 } 4230 case SIOCINQ: 4231 { 4232 struct sk_buff *skb; 4233 int amount = 0; 4234 4235 spin_lock_bh(&sk->sk_receive_queue.lock); 4236 skb = skb_peek(&sk->sk_receive_queue); 4237 if (skb) 4238 amount = skb->len; 4239 spin_unlock_bh(&sk->sk_receive_queue.lock); 4240 return put_user(amount, (int __user *)arg); 4241 } 4242 #ifdef CONFIG_INET 4243 case SIOCADDRT: 4244 case SIOCDELRT: 4245 case SIOCDARP: 4246 case SIOCGARP: 4247 case SIOCSARP: 4248 case SIOCGIFADDR: 4249 case SIOCSIFADDR: 4250 case SIOCGIFBRDADDR: 4251 case SIOCSIFBRDADDR: 4252 case SIOCGIFNETMASK: 4253 case SIOCSIFNETMASK: 4254 case SIOCGIFDSTADDR: 4255 case SIOCSIFDSTADDR: 4256 case SIOCSIFFLAGS: 4257 return inet_dgram_ops.ioctl(sock, cmd, arg); 4258 #endif 4259 4260 default: 4261 return -ENOIOCTLCMD; 4262 } 4263 return 0; 4264 } 4265 4266 static __poll_t packet_poll(struct file *file, struct socket *sock, 4267 poll_table *wait) 4268 { 4269 struct sock *sk = sock->sk; 4270 struct packet_sock *po = pkt_sk(sk); 4271 __poll_t mask = datagram_poll(file, sock, wait); 4272 4273 spin_lock_bh(&sk->sk_receive_queue.lock); 4274 if (po->rx_ring.pg_vec) { 4275 if (!packet_previous_rx_frame(po, &po->rx_ring, 4276 TP_STATUS_KERNEL)) 4277 mask |= EPOLLIN | EPOLLRDNORM; 4278 } 4279 packet_rcv_try_clear_pressure(po); 4280 spin_unlock_bh(&sk->sk_receive_queue.lock); 4281 spin_lock_bh(&sk->sk_write_queue.lock); 4282 if (po->tx_ring.pg_vec) { 4283 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4284 mask |= EPOLLOUT | EPOLLWRNORM; 4285 } 4286 spin_unlock_bh(&sk->sk_write_queue.lock); 4287 return mask; 4288 } 4289 4290 4291 /* Dirty? Well, I still did not learn better way to account 4292 * for user mmaps. 4293 */ 4294 4295 static void packet_mm_open(struct vm_area_struct *vma) 4296 { 4297 struct file *file = vma->vm_file; 4298 struct socket *sock = file->private_data; 4299 struct sock *sk = sock->sk; 4300 4301 if (sk) 4302 atomic_long_inc(&pkt_sk(sk)->mapped); 4303 } 4304 4305 static void packet_mm_close(struct vm_area_struct *vma) 4306 { 4307 struct file *file = vma->vm_file; 4308 struct socket *sock = file->private_data; 4309 struct sock *sk = sock->sk; 4310 4311 if (sk) 4312 atomic_long_dec(&pkt_sk(sk)->mapped); 4313 } 4314 4315 static const struct vm_operations_struct packet_mmap_ops = { 4316 .open = packet_mm_open, 4317 .close = packet_mm_close, 4318 }; 4319 4320 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4321 unsigned int len) 4322 { 4323 int i; 4324 4325 for (i = 0; i < len; i++) { 4326 if (likely(pg_vec[i].buffer)) { 4327 if (is_vmalloc_addr(pg_vec[i].buffer)) 4328 vfree(pg_vec[i].buffer); 4329 else 4330 free_pages((unsigned long)pg_vec[i].buffer, 4331 order); 4332 pg_vec[i].buffer = NULL; 4333 } 4334 } 4335 kfree(pg_vec); 4336 } 4337 4338 static char *alloc_one_pg_vec_page(unsigned long order) 4339 { 4340 char *buffer; 4341 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4342 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4343 4344 buffer = (char *) __get_free_pages(gfp_flags, order); 4345 if (buffer) 4346 return buffer; 4347 4348 /* __get_free_pages failed, fall back to vmalloc */ 4349 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); 4350 if (buffer) 4351 return buffer; 4352 4353 /* vmalloc failed, lets dig into swap here */ 4354 gfp_flags &= ~__GFP_NORETRY; 4355 buffer = (char *) __get_free_pages(gfp_flags, order); 4356 if (buffer) 4357 return buffer; 4358 4359 /* complete and utter failure */ 4360 return NULL; 4361 } 4362 4363 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4364 { 4365 unsigned int block_nr = req->tp_block_nr; 4366 struct pgv *pg_vec; 4367 int i; 4368 4369 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); 4370 if (unlikely(!pg_vec)) 4371 goto out; 4372 4373 for (i = 0; i < block_nr; i++) { 4374 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4375 if (unlikely(!pg_vec[i].buffer)) 4376 goto out_free_pgvec; 4377 } 4378 4379 out: 4380 return pg_vec; 4381 4382 out_free_pgvec: 4383 free_pg_vec(pg_vec, order, block_nr); 4384 pg_vec = NULL; 4385 goto out; 4386 } 4387 4388 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4389 int closing, int tx_ring) 4390 { 4391 struct pgv *pg_vec = NULL; 4392 struct packet_sock *po = pkt_sk(sk); 4393 unsigned long *rx_owner_map = NULL; 4394 int was_running, order = 0; 4395 struct packet_ring_buffer *rb; 4396 struct sk_buff_head *rb_queue; 4397 __be16 num; 4398 int err; 4399 /* Added to avoid minimal code churn */ 4400 struct tpacket_req *req = &req_u->req; 4401 4402 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4403 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4404 4405 err = -EBUSY; 4406 if (!closing) { 4407 if (atomic_long_read(&po->mapped)) 4408 goto out; 4409 if (packet_read_pending(rb)) 4410 goto out; 4411 } 4412 4413 if (req->tp_block_nr) { 4414 unsigned int min_frame_size; 4415 4416 /* Sanity tests and some calculations */ 4417 err = -EBUSY; 4418 if (unlikely(rb->pg_vec)) 4419 goto out; 4420 4421 switch (po->tp_version) { 4422 case TPACKET_V1: 4423 po->tp_hdrlen = TPACKET_HDRLEN; 4424 break; 4425 case TPACKET_V2: 4426 po->tp_hdrlen = TPACKET2_HDRLEN; 4427 break; 4428 case TPACKET_V3: 4429 po->tp_hdrlen = TPACKET3_HDRLEN; 4430 break; 4431 } 4432 4433 err = -EINVAL; 4434 if (unlikely((int)req->tp_block_size <= 0)) 4435 goto out; 4436 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4437 goto out; 4438 min_frame_size = po->tp_hdrlen + po->tp_reserve; 4439 if (po->tp_version >= TPACKET_V3 && 4440 req->tp_block_size < 4441 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) 4442 goto out; 4443 if (unlikely(req->tp_frame_size < min_frame_size)) 4444 goto out; 4445 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4446 goto out; 4447 4448 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4449 if (unlikely(rb->frames_per_block == 0)) 4450 goto out; 4451 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) 4452 goto out; 4453 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4454 req->tp_frame_nr)) 4455 goto out; 4456 4457 err = -ENOMEM; 4458 order = get_order(req->tp_block_size); 4459 pg_vec = alloc_pg_vec(req, order); 4460 if (unlikely(!pg_vec)) 4461 goto out; 4462 switch (po->tp_version) { 4463 case TPACKET_V3: 4464 /* Block transmit is not supported yet */ 4465 if (!tx_ring) { 4466 init_prb_bdqc(po, rb, pg_vec, req_u); 4467 } else { 4468 struct tpacket_req3 *req3 = &req_u->req3; 4469 4470 if (req3->tp_retire_blk_tov || 4471 req3->tp_sizeof_priv || 4472 req3->tp_feature_req_word) { 4473 err = -EINVAL; 4474 goto out_free_pg_vec; 4475 } 4476 } 4477 break; 4478 default: 4479 if (!tx_ring) { 4480 rx_owner_map = bitmap_alloc(req->tp_frame_nr, 4481 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 4482 if (!rx_owner_map) 4483 goto out_free_pg_vec; 4484 } 4485 break; 4486 } 4487 } 4488 /* Done */ 4489 else { 4490 err = -EINVAL; 4491 if (unlikely(req->tp_frame_nr)) 4492 goto out; 4493 } 4494 4495 4496 /* Detach socket from network */ 4497 spin_lock(&po->bind_lock); 4498 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING); 4499 num = po->num; 4500 if (was_running) { 4501 WRITE_ONCE(po->num, 0); 4502 __unregister_prot_hook(sk, false); 4503 } 4504 spin_unlock(&po->bind_lock); 4505 4506 synchronize_net(); 4507 4508 err = -EBUSY; 4509 mutex_lock(&po->pg_vec_lock); 4510 if (closing || atomic_long_read(&po->mapped) == 0) { 4511 err = 0; 4512 spin_lock_bh(&rb_queue->lock); 4513 swap(rb->pg_vec, pg_vec); 4514 if (po->tp_version <= TPACKET_V2) 4515 swap(rb->rx_owner_map, rx_owner_map); 4516 rb->frame_max = (req->tp_frame_nr - 1); 4517 rb->head = 0; 4518 rb->frame_size = req->tp_frame_size; 4519 spin_unlock_bh(&rb_queue->lock); 4520 4521 swap(rb->pg_vec_order, order); 4522 swap(rb->pg_vec_len, req->tp_block_nr); 4523 4524 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4525 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4526 tpacket_rcv : packet_rcv; 4527 skb_queue_purge(rb_queue); 4528 if (atomic_long_read(&po->mapped)) 4529 pr_err("packet_mmap: vma is busy: %ld\n", 4530 atomic_long_read(&po->mapped)); 4531 } 4532 mutex_unlock(&po->pg_vec_lock); 4533 4534 spin_lock(&po->bind_lock); 4535 if (was_running) { 4536 WRITE_ONCE(po->num, num); 4537 register_prot_hook(sk); 4538 } 4539 spin_unlock(&po->bind_lock); 4540 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4541 /* Because we don't support block-based V3 on tx-ring */ 4542 if (!tx_ring) 4543 prb_shutdown_retire_blk_timer(po, rb_queue); 4544 } 4545 4546 out_free_pg_vec: 4547 if (pg_vec) { 4548 bitmap_free(rx_owner_map); 4549 free_pg_vec(pg_vec, order, req->tp_block_nr); 4550 } 4551 out: 4552 return err; 4553 } 4554 4555 static int packet_mmap(struct file *file, struct socket *sock, 4556 struct vm_area_struct *vma) 4557 { 4558 struct sock *sk = sock->sk; 4559 struct packet_sock *po = pkt_sk(sk); 4560 unsigned long size, expected_size; 4561 struct packet_ring_buffer *rb; 4562 unsigned long start; 4563 int err = -EINVAL; 4564 int i; 4565 4566 if (vma->vm_pgoff) 4567 return -EINVAL; 4568 4569 mutex_lock(&po->pg_vec_lock); 4570 4571 expected_size = 0; 4572 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4573 if (rb->pg_vec) { 4574 expected_size += rb->pg_vec_len 4575 * rb->pg_vec_pages 4576 * PAGE_SIZE; 4577 } 4578 } 4579 4580 if (expected_size == 0) 4581 goto out; 4582 4583 size = vma->vm_end - vma->vm_start; 4584 if (size != expected_size) 4585 goto out; 4586 4587 start = vma->vm_start; 4588 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4589 if (rb->pg_vec == NULL) 4590 continue; 4591 4592 for (i = 0; i < rb->pg_vec_len; i++) { 4593 struct page *page; 4594 void *kaddr = rb->pg_vec[i].buffer; 4595 int pg_num; 4596 4597 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4598 page = pgv_to_page(kaddr); 4599 err = vm_insert_page(vma, start, page); 4600 if (unlikely(err)) 4601 goto out; 4602 start += PAGE_SIZE; 4603 kaddr += PAGE_SIZE; 4604 } 4605 } 4606 } 4607 4608 atomic_long_inc(&po->mapped); 4609 vma->vm_ops = &packet_mmap_ops; 4610 err = 0; 4611 4612 out: 4613 mutex_unlock(&po->pg_vec_lock); 4614 return err; 4615 } 4616 4617 static const struct proto_ops packet_ops_spkt = { 4618 .family = PF_PACKET, 4619 .owner = THIS_MODULE, 4620 .release = packet_release, 4621 .bind = packet_bind_spkt, 4622 .connect = sock_no_connect, 4623 .socketpair = sock_no_socketpair, 4624 .accept = sock_no_accept, 4625 .getname = packet_getname_spkt, 4626 .poll = datagram_poll, 4627 .ioctl = packet_ioctl, 4628 .gettstamp = sock_gettstamp, 4629 .listen = sock_no_listen, 4630 .shutdown = sock_no_shutdown, 4631 .sendmsg = packet_sendmsg_spkt, 4632 .recvmsg = packet_recvmsg, 4633 .mmap = sock_no_mmap, 4634 }; 4635 4636 static const struct proto_ops packet_ops = { 4637 .family = PF_PACKET, 4638 .owner = THIS_MODULE, 4639 .release = packet_release, 4640 .bind = packet_bind, 4641 .connect = sock_no_connect, 4642 .socketpair = sock_no_socketpair, 4643 .accept = sock_no_accept, 4644 .getname = packet_getname, 4645 .poll = packet_poll, 4646 .ioctl = packet_ioctl, 4647 .gettstamp = sock_gettstamp, 4648 .listen = sock_no_listen, 4649 .shutdown = sock_no_shutdown, 4650 .setsockopt = packet_setsockopt, 4651 .getsockopt = packet_getsockopt, 4652 .sendmsg = packet_sendmsg, 4653 .recvmsg = packet_recvmsg, 4654 .mmap = packet_mmap, 4655 }; 4656 4657 static const struct net_proto_family packet_family_ops = { 4658 .family = PF_PACKET, 4659 .create = packet_create, 4660 .owner = THIS_MODULE, 4661 }; 4662 4663 static struct notifier_block packet_netdev_notifier = { 4664 .notifier_call = packet_notifier, 4665 }; 4666 4667 #ifdef CONFIG_PROC_FS 4668 4669 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4670 __acquires(RCU) 4671 { 4672 struct net *net = seq_file_net(seq); 4673 4674 rcu_read_lock(); 4675 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4676 } 4677 4678 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4679 { 4680 struct net *net = seq_file_net(seq); 4681 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4682 } 4683 4684 static void packet_seq_stop(struct seq_file *seq, void *v) 4685 __releases(RCU) 4686 { 4687 rcu_read_unlock(); 4688 } 4689 4690 static int packet_seq_show(struct seq_file *seq, void *v) 4691 { 4692 if (v == SEQ_START_TOKEN) 4693 seq_printf(seq, 4694 "%*sRefCnt Type Proto Iface R Rmem User Inode\n", 4695 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk"); 4696 else { 4697 struct sock *s = sk_entry(v); 4698 const struct packet_sock *po = pkt_sk(s); 4699 4700 seq_printf(seq, 4701 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 4702 s, 4703 refcount_read(&s->sk_refcnt), 4704 s->sk_type, 4705 ntohs(READ_ONCE(po->num)), 4706 READ_ONCE(po->ifindex), 4707 packet_sock_flag(po, PACKET_SOCK_RUNNING), 4708 atomic_read(&s->sk_rmem_alloc), 4709 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), 4710 sock_i_ino(s)); 4711 } 4712 4713 return 0; 4714 } 4715 4716 static const struct seq_operations packet_seq_ops = { 4717 .start = packet_seq_start, 4718 .next = packet_seq_next, 4719 .stop = packet_seq_stop, 4720 .show = packet_seq_show, 4721 }; 4722 #endif 4723 4724 static int __net_init packet_net_init(struct net *net) 4725 { 4726 mutex_init(&net->packet.sklist_lock); 4727 INIT_HLIST_HEAD(&net->packet.sklist); 4728 4729 #ifdef CONFIG_PROC_FS 4730 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4731 sizeof(struct seq_net_private))) 4732 return -ENOMEM; 4733 #endif /* CONFIG_PROC_FS */ 4734 4735 return 0; 4736 } 4737 4738 static void __net_exit packet_net_exit(struct net *net) 4739 { 4740 remove_proc_entry("packet", net->proc_net); 4741 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4742 } 4743 4744 static struct pernet_operations packet_net_ops = { 4745 .init = packet_net_init, 4746 .exit = packet_net_exit, 4747 }; 4748 4749 4750 static void __exit packet_exit(void) 4751 { 4752 sock_unregister(PF_PACKET); 4753 proto_unregister(&packet_proto); 4754 unregister_netdevice_notifier(&packet_netdev_notifier); 4755 unregister_pernet_subsys(&packet_net_ops); 4756 } 4757 4758 static int __init packet_init(void) 4759 { 4760 int rc; 4761 4762 rc = register_pernet_subsys(&packet_net_ops); 4763 if (rc) 4764 goto out; 4765 rc = register_netdevice_notifier(&packet_netdev_notifier); 4766 if (rc) 4767 goto out_pernet; 4768 rc = proto_register(&packet_proto, 0); 4769 if (rc) 4770 goto out_notifier; 4771 rc = sock_register(&packet_family_ops); 4772 if (rc) 4773 goto out_proto; 4774 4775 return 0; 4776 4777 out_proto: 4778 proto_unregister(&packet_proto); 4779 out_notifier: 4780 unregister_netdevice_notifier(&packet_netdev_notifier); 4781 out_pernet: 4782 unregister_pernet_subsys(&packet_net_ops); 4783 out: 4784 return rc; 4785 } 4786 4787 module_init(packet_init); 4788 module_exit(packet_exit); 4789 MODULE_DESCRIPTION("Packet socket support (AF_PACKET)"); 4790 MODULE_LICENSE("GPL"); 4791 MODULE_ALIAS_NETPROTO(PF_PACKET); 4792