1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * PACKET - implements raw packet sockets. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * 13 * Fixes: 14 * Alan Cox : verify_area() now used correctly 15 * Alan Cox : new skbuff lists, look ma no backlogs! 16 * Alan Cox : tidied skbuff lists. 17 * Alan Cox : Now uses generic datagram routines I 18 * added. Also fixed the peek/read crash 19 * from all old Linux datagram code. 20 * Alan Cox : Uses the improved datagram code. 21 * Alan Cox : Added NULL's for socket options. 22 * Alan Cox : Re-commented the code. 23 * Alan Cox : Use new kernel side addressing 24 * Rob Janssen : Correct MTU usage. 25 * Dave Platt : Counter leaks caused by incorrect 26 * interrupt locking and some slightly 27 * dubious gcc output. Can you read 28 * compiler: it said _VOLATILE_ 29 * Richard Kooijman : Timestamp fixes. 30 * Alan Cox : New buffers. Use sk->mac.raw. 31 * Alan Cox : sendmsg/recvmsg support. 32 * Alan Cox : Protocol setting support 33 * Alexey Kuznetsov : Untied from IPv4 stack. 34 * Cyrus Durgin : Fixed kerneld for kmod. 35 * Michal Ostrowski : Module initialization cleanup. 36 * Ulises Alonso : Frame number limit removal and 37 * packet_set_ring memory leak. 38 * Eric Biederman : Allow for > 8 byte hardware addresses. 39 * The convention is that longer addresses 40 * will simply extend the hardware address 41 * byte arrays at the end of sockaddr_ll 42 * and packet_mreq. 43 * Johann Baudy : Added TX RING. 44 * Chetan Loke : Implemented TPACKET_V3 block abstraction 45 * layer. 46 * Copyright (C) 2011, <lokec@ccs.neu.edu> 47 */ 48 49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 50 51 #include <linux/ethtool.h> 52 #include <linux/uio.h> 53 #include <linux/filter.h> 54 #include <linux/types.h> 55 #include <linux/mm.h> 56 #include <linux/capability.h> 57 #include <linux/fcntl.h> 58 #include <linux/socket.h> 59 #include <linux/in.h> 60 #include <linux/inet.h> 61 #include <linux/netdevice.h> 62 #include <linux/if_packet.h> 63 #include <linux/wireless.h> 64 #include <linux/kernel.h> 65 #include <linux/kmod.h> 66 #include <linux/slab.h> 67 #include <linux/vmalloc.h> 68 #include <net/net_namespace.h> 69 #include <net/ip.h> 70 #include <net/protocol.h> 71 #include <linux/skbuff.h> 72 #include <net/sock.h> 73 #include <linux/errno.h> 74 #include <linux/timer.h> 75 #include <linux/uaccess.h> 76 #include <asm/ioctls.h> 77 #include <asm/page.h> 78 #include <asm/cacheflush.h> 79 #include <asm/io.h> 80 #include <linux/proc_fs.h> 81 #include <linux/seq_file.h> 82 #include <linux/poll.h> 83 #include <linux/module.h> 84 #include <linux/init.h> 85 #include <linux/mutex.h> 86 #include <linux/if_vlan.h> 87 #include <linux/virtio_net.h> 88 #include <linux/errqueue.h> 89 #include <linux/net_tstamp.h> 90 #include <linux/percpu.h> 91 #ifdef CONFIG_INET 92 #include <net/inet_common.h> 93 #endif 94 #include <linux/bpf.h> 95 #include <net/compat.h> 96 #include <linux/netfilter_netdev.h> 97 98 #include "internal.h" 99 100 /* 101 Assumptions: 102 - If the device has no dev->header_ops->create, there is no LL header 103 visible above the device. In this case, its hard_header_len should be 0. 104 The device may prepend its own header internally. In this case, its 105 needed_headroom should be set to the space needed for it to add its 106 internal header. 107 For example, a WiFi driver pretending to be an Ethernet driver should 108 set its hard_header_len to be the Ethernet header length, and set its 109 needed_headroom to be (the real WiFi header length - the fake Ethernet 110 header length). 111 - packet socket receives packets with pulled ll header, 112 so that SOCK_RAW should push it back. 113 114 On receive: 115 ----------- 116 117 Incoming, dev_has_header(dev) == true 118 mac_header -> ll header 119 data -> data 120 121 Outgoing, dev_has_header(dev) == true 122 mac_header -> ll header 123 data -> ll header 124 125 Incoming, dev_has_header(dev) == false 126 mac_header -> data 127 However drivers often make it point to the ll header. 128 This is incorrect because the ll header should be invisible to us. 129 data -> data 130 131 Outgoing, dev_has_header(dev) == false 132 mac_header -> data. ll header is invisible to us. 133 data -> data 134 135 Resume 136 If dev_has_header(dev) == false we are unable to restore the ll header, 137 because it is invisible to us. 138 139 140 On transmit: 141 ------------ 142 143 dev_has_header(dev) == true 144 mac_header -> ll header 145 data -> ll header 146 147 dev_has_header(dev) == false (ll header is invisible to us) 148 mac_header -> data 149 data -> data 150 151 We should set network_header on output to the correct position, 152 packet classifier depends on it. 153 */ 154 155 /* Private packet socket structures. */ 156 157 /* identical to struct packet_mreq except it has 158 * a longer address field. 159 */ 160 struct packet_mreq_max { 161 int mr_ifindex; 162 unsigned short mr_type; 163 unsigned short mr_alen; 164 unsigned char mr_address[MAX_ADDR_LEN]; 165 }; 166 167 union tpacket_uhdr { 168 struct tpacket_hdr *h1; 169 struct tpacket2_hdr *h2; 170 struct tpacket3_hdr *h3; 171 void *raw; 172 }; 173 174 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 175 int closing, int tx_ring); 176 177 #define V3_ALIGNMENT (8) 178 179 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) 180 181 #define BLK_PLUS_PRIV(sz_of_priv) \ 182 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 183 184 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 185 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 186 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) 187 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) 188 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) 189 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) 190 191 struct packet_sock; 192 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 193 struct packet_type *pt, struct net_device *orig_dev); 194 195 static void *packet_previous_frame(struct packet_sock *po, 196 struct packet_ring_buffer *rb, 197 int status); 198 static void packet_increment_head(struct packet_ring_buffer *buff); 199 static int prb_curr_blk_in_use(struct tpacket_block_desc *); 200 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, 201 struct packet_sock *); 202 static void prb_retire_current_block(struct tpacket_kbdq_core *, 203 struct packet_sock *, unsigned int status); 204 static int prb_queue_frozen(struct tpacket_kbdq_core *); 205 static void prb_open_block(struct tpacket_kbdq_core *, 206 struct tpacket_block_desc *); 207 static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *); 208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); 209 static void prb_clear_rxhash(struct tpacket_kbdq_core *, 210 struct tpacket3_hdr *); 211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *, 212 struct tpacket3_hdr *); 213 static void packet_flush_mclist(struct sock *sk); 214 static u16 packet_pick_tx_queue(struct sk_buff *skb); 215 216 struct packet_skb_cb { 217 union { 218 struct sockaddr_pkt pkt; 219 union { 220 /* Trick: alias skb original length with 221 * ll.sll_family and ll.protocol in order 222 * to save room. 223 */ 224 unsigned int origlen; 225 struct sockaddr_ll ll; 226 }; 227 } sa; 228 }; 229 230 #define vio_le() virtio_legacy_is_little_endian() 231 232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 233 234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 235 #define GET_PBLOCK_DESC(x, bid) \ 236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) 237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ 238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) 239 #define GET_NEXT_PRB_BLK_NUM(x) \ 240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 241 ((x)->kactive_blk_num+1) : 0) 242 243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 244 static void __fanout_link(struct sock *sk, struct packet_sock *po); 245 246 #ifdef CONFIG_NETFILTER_EGRESS 247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb) 248 { 249 struct sk_buff *next, *head = NULL, *tail; 250 int rc; 251 252 rcu_read_lock(); 253 for (; skb != NULL; skb = next) { 254 next = skb->next; 255 skb_mark_not_on_list(skb); 256 257 if (!nf_hook_egress(skb, &rc, skb->dev)) 258 continue; 259 260 if (!head) 261 head = skb; 262 else 263 tail->next = skb; 264 265 tail = skb; 266 } 267 rcu_read_unlock(); 268 269 return head; 270 } 271 #endif 272 273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb) 274 { 275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS)) 276 return dev_queue_xmit(skb); 277 278 #ifdef CONFIG_NETFILTER_EGRESS 279 if (nf_hook_egress_active()) { 280 skb = nf_hook_direct_egress(skb); 281 if (!skb) 282 return NET_XMIT_DROP; 283 } 284 #endif 285 return dev_direct_xmit(skb, packet_pick_tx_queue(skb)); 286 } 287 288 static struct net_device *packet_cached_dev_get(struct packet_sock *po) 289 { 290 struct net_device *dev; 291 292 rcu_read_lock(); 293 dev = rcu_dereference(po->cached_dev); 294 dev_hold(dev); 295 rcu_read_unlock(); 296 297 return dev; 298 } 299 300 static void packet_cached_dev_assign(struct packet_sock *po, 301 struct net_device *dev) 302 { 303 rcu_assign_pointer(po->cached_dev, dev); 304 } 305 306 static void packet_cached_dev_reset(struct packet_sock *po) 307 { 308 RCU_INIT_POINTER(po->cached_dev, NULL); 309 } 310 311 static u16 packet_pick_tx_queue(struct sk_buff *skb) 312 { 313 struct net_device *dev = skb->dev; 314 const struct net_device_ops *ops = dev->netdev_ops; 315 int cpu = raw_smp_processor_id(); 316 u16 queue_index; 317 318 #ifdef CONFIG_XPS 319 skb->sender_cpu = cpu + 1; 320 #endif 321 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues); 322 if (ops->ndo_select_queue) { 323 queue_index = ops->ndo_select_queue(dev, skb, NULL); 324 queue_index = netdev_cap_txqueue(dev, queue_index); 325 } else { 326 queue_index = netdev_pick_tx(dev, skb, NULL); 327 } 328 329 return queue_index; 330 } 331 332 /* __register_prot_hook must be invoked through register_prot_hook 333 * or from a context in which asynchronous accesses to the packet 334 * socket is not possible (packet_create()). 335 */ 336 static void __register_prot_hook(struct sock *sk) 337 { 338 struct packet_sock *po = pkt_sk(sk); 339 340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 341 if (po->fanout) 342 __fanout_link(sk, po); 343 else 344 dev_add_pack(&po->prot_hook); 345 346 sock_hold(sk); 347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1); 348 } 349 } 350 351 static void register_prot_hook(struct sock *sk) 352 { 353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); 354 __register_prot_hook(sk); 355 } 356 357 /* If the sync parameter is true, we will temporarily drop 358 * the po->bind_lock and do a synchronize_net to make sure no 359 * asynchronous packet processing paths still refer to the elements 360 * of po->prot_hook. If the sync parameter is false, it is the 361 * callers responsibility to take care of this. 362 */ 363 static void __unregister_prot_hook(struct sock *sk, bool sync) 364 { 365 struct packet_sock *po = pkt_sk(sk); 366 367 lockdep_assert_held_once(&po->bind_lock); 368 369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0); 370 371 if (po->fanout) 372 __fanout_unlink(sk, po); 373 else 374 __dev_remove_pack(&po->prot_hook); 375 376 __sock_put(sk); 377 378 if (sync) { 379 spin_unlock(&po->bind_lock); 380 synchronize_net(); 381 spin_lock(&po->bind_lock); 382 } 383 } 384 385 static void unregister_prot_hook(struct sock *sk, bool sync) 386 { 387 struct packet_sock *po = pkt_sk(sk); 388 389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) 390 __unregister_prot_hook(sk, sync); 391 } 392 393 static inline struct page * __pure pgv_to_page(void *addr) 394 { 395 if (is_vmalloc_addr(addr)) 396 return vmalloc_to_page(addr); 397 return virt_to_page(addr); 398 } 399 400 static void __packet_set_status(struct packet_sock *po, void *frame, int status) 401 { 402 union tpacket_uhdr h; 403 404 /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ 405 406 h.raw = frame; 407 switch (po->tp_version) { 408 case TPACKET_V1: 409 WRITE_ONCE(h.h1->tp_status, status); 410 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 411 break; 412 case TPACKET_V2: 413 WRITE_ONCE(h.h2->tp_status, status); 414 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 415 break; 416 case TPACKET_V3: 417 WRITE_ONCE(h.h3->tp_status, status); 418 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 419 break; 420 default: 421 WARN(1, "TPACKET version not supported.\n"); 422 BUG(); 423 } 424 425 smp_wmb(); 426 } 427 428 static int __packet_get_status(const struct packet_sock *po, void *frame) 429 { 430 union tpacket_uhdr h; 431 432 smp_rmb(); 433 434 /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ 435 436 h.raw = frame; 437 switch (po->tp_version) { 438 case TPACKET_V1: 439 flush_dcache_page(pgv_to_page(&h.h1->tp_status)); 440 return READ_ONCE(h.h1->tp_status); 441 case TPACKET_V2: 442 flush_dcache_page(pgv_to_page(&h.h2->tp_status)); 443 return READ_ONCE(h.h2->tp_status); 444 case TPACKET_V3: 445 flush_dcache_page(pgv_to_page(&h.h3->tp_status)); 446 return READ_ONCE(h.h3->tp_status); 447 default: 448 WARN(1, "TPACKET version not supported.\n"); 449 BUG(); 450 return 0; 451 } 452 } 453 454 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, 455 unsigned int flags) 456 { 457 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 458 459 if (shhwtstamps && 460 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && 461 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts)) 462 return TP_STATUS_TS_RAW_HARDWARE; 463 464 if ((flags & SOF_TIMESTAMPING_SOFTWARE) && 465 ktime_to_timespec64_cond(skb_tstamp(skb), ts)) 466 return TP_STATUS_TS_SOFTWARE; 467 468 return 0; 469 } 470 471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, 472 struct sk_buff *skb) 473 { 474 union tpacket_uhdr h; 475 struct timespec64 ts; 476 __u32 ts_status; 477 478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp)))) 479 return 0; 480 481 h.raw = frame; 482 /* 483 * versions 1 through 3 overflow the timestamps in y2106, since they 484 * all store the seconds in a 32-bit unsigned integer. 485 * If we create a version 4, that should have a 64-bit timestamp, 486 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit 487 * nanoseconds. 488 */ 489 switch (po->tp_version) { 490 case TPACKET_V1: 491 h.h1->tp_sec = ts.tv_sec; 492 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 493 break; 494 case TPACKET_V2: 495 h.h2->tp_sec = ts.tv_sec; 496 h.h2->tp_nsec = ts.tv_nsec; 497 break; 498 case TPACKET_V3: 499 h.h3->tp_sec = ts.tv_sec; 500 h.h3->tp_nsec = ts.tv_nsec; 501 break; 502 default: 503 WARN(1, "TPACKET version not supported.\n"); 504 BUG(); 505 } 506 507 /* one flush is safe, as both fields always lie on the same cacheline */ 508 flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); 509 smp_wmb(); 510 511 return ts_status; 512 } 513 514 static void *packet_lookup_frame(const struct packet_sock *po, 515 const struct packet_ring_buffer *rb, 516 unsigned int position, 517 int status) 518 { 519 unsigned int pg_vec_pos, frame_offset; 520 union tpacket_uhdr h; 521 522 pg_vec_pos = position / rb->frames_per_block; 523 frame_offset = position % rb->frames_per_block; 524 525 h.raw = rb->pg_vec[pg_vec_pos].buffer + 526 (frame_offset * rb->frame_size); 527 528 if (status != __packet_get_status(po, h.raw)) 529 return NULL; 530 531 return h.raw; 532 } 533 534 static void *packet_current_frame(struct packet_sock *po, 535 struct packet_ring_buffer *rb, 536 int status) 537 { 538 return packet_lookup_frame(po, rb, rb->head, status); 539 } 540 541 static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev) 542 { 543 struct vlan_hdr vhdr, *vh; 544 unsigned int header_len; 545 546 if (!dev) 547 return 0; 548 549 /* In the SOCK_DGRAM scenario, skb data starts at the network 550 * protocol, which is after the VLAN headers. The outer VLAN 551 * header is at the hard_header_len offset in non-variable 552 * length link layer headers. If it's a VLAN device, the 553 * min_header_len should be used to exclude the VLAN header 554 * size. 555 */ 556 if (dev->min_header_len == dev->hard_header_len) 557 header_len = dev->hard_header_len; 558 else if (is_vlan_dev(dev)) 559 header_len = dev->min_header_len; 560 else 561 return 0; 562 563 vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len, 564 sizeof(vhdr), &vhdr); 565 if (unlikely(!vh)) 566 return 0; 567 568 return ntohs(vh->h_vlan_TCI); 569 } 570 571 static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb) 572 { 573 __be16 proto = skb->protocol; 574 575 if (unlikely(eth_type_vlan(proto))) 576 proto = vlan_get_protocol_offset_inline(skb, proto, 577 skb_mac_offset(skb), 578 NULL); 579 580 return proto; 581 } 582 583 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, 584 struct sk_buff_head *rb_queue) 585 { 586 struct tpacket_kbdq_core *pkc; 587 588 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 589 hrtimer_cancel(&pkc->retire_blk_timer); 590 } 591 592 static int prb_calc_retire_blk_tmo(struct packet_sock *po, 593 int blk_size_in_bytes) 594 { 595 struct net_device *dev; 596 unsigned int mbits, div; 597 struct ethtool_link_ksettings ecmd; 598 int err; 599 600 rtnl_lock(); 601 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); 602 if (unlikely(!dev)) { 603 rtnl_unlock(); 604 return DEFAULT_PRB_RETIRE_TOV; 605 } 606 err = __ethtool_get_link_ksettings(dev, &ecmd); 607 rtnl_unlock(); 608 if (err) 609 return DEFAULT_PRB_RETIRE_TOV; 610 611 /* If the link speed is so slow you don't really 612 * need to worry about perf anyways 613 */ 614 if (ecmd.base.speed < SPEED_1000 || 615 ecmd.base.speed == SPEED_UNKNOWN) 616 return DEFAULT_PRB_RETIRE_TOV; 617 618 div = ecmd.base.speed / 1000; 619 mbits = (blk_size_in_bytes * 8) / (1024 * 1024); 620 621 if (div) 622 mbits /= div; 623 624 if (div) 625 return mbits + 1; 626 return mbits; 627 } 628 629 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, 630 union tpacket_req_u *req_u) 631 { 632 p1->feature_req_word = req_u->req3.tp_feature_req_word; 633 } 634 635 static void init_prb_bdqc(struct packet_sock *po, 636 struct packet_ring_buffer *rb, 637 struct pgv *pg_vec, 638 union tpacket_req_u *req_u) 639 { 640 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); 641 struct tpacket_block_desc *pbd; 642 643 memset(p1, 0x0, sizeof(*p1)); 644 645 p1->knxt_seq_num = 1; 646 p1->pkbdq = pg_vec; 647 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; 648 p1->pkblk_start = pg_vec[0].buffer; 649 p1->kblk_size = req_u->req3.tp_block_size; 650 p1->knum_blocks = req_u->req3.tp_block_nr; 651 p1->hdrlen = po->tp_hdrlen; 652 p1->version = po->tp_version; 653 po->stats.stats3.tp_freeze_q_cnt = 0; 654 if (req_u->req3.tp_retire_blk_tov) 655 p1->interval_ktime = ms_to_ktime(req_u->req3.tp_retire_blk_tov); 656 else 657 p1->interval_ktime = ms_to_ktime(prb_calc_retire_blk_tmo(po, 658 req_u->req3.tp_block_size)); 659 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; 660 rwlock_init(&p1->blk_fill_in_prog_lock); 661 662 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); 663 prb_init_ft_ops(p1, req_u); 664 hrtimer_setup(&p1->retire_blk_timer, prb_retire_rx_blk_timer_expired, 665 CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); 666 hrtimer_start(&p1->retire_blk_timer, p1->interval_ktime, 667 HRTIMER_MODE_REL_SOFT); 668 prb_open_block(p1, pbd); 669 } 670 671 /* 672 * With a 1MB block-size, on a 1Gbps line, it will take 673 * i) ~8 ms to fill a block + ii) memcpy etc. 674 * In this cut we are not accounting for the memcpy time. 675 * 676 * Since the tmo granularity is in msecs, it is not too expensive 677 * to refresh the timer, lets say every '8' msecs. 678 * Either the user can set the 'tmo' or we can derive it based on 679 * a) line-speed and b) block-size. 680 * prb_calc_retire_blk_tmo() calculates the tmo. 681 */ 682 static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *t) 683 { 684 struct packet_sock *po = 685 timer_container_of(po, t, rx_ring.prb_bdqc.retire_blk_timer); 686 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 687 unsigned int frozen; 688 struct tpacket_block_desc *pbd; 689 690 spin_lock(&po->sk.sk_receive_queue.lock); 691 692 frozen = prb_queue_frozen(pkc); 693 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 694 695 /* We only need to plug the race when the block is partially filled. 696 * tpacket_rcv: 697 * lock(); increment BLOCK_NUM_PKTS; unlock() 698 * copy_bits() is in progress ... 699 * timer fires on other cpu: 700 * we can't retire the current block because copy_bits 701 * is in progress. 702 * 703 */ 704 if (BLOCK_NUM_PKTS(pbd)) { 705 /* Waiting for skb_copy_bits to finish... */ 706 write_lock(&pkc->blk_fill_in_prog_lock); 707 write_unlock(&pkc->blk_fill_in_prog_lock); 708 } 709 710 if (!frozen) { 711 if (BLOCK_NUM_PKTS(pbd)) { 712 /* Not an empty block. Need retire the block. */ 713 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); 714 prb_dispatch_next_block(pkc, po); 715 } 716 } else { 717 /* Case 1. Queue was frozen because user-space was 718 * lagging behind. 719 */ 720 if (!prb_curr_blk_in_use(pbd)) { 721 /* Case 2. queue was frozen,user-space caught up, 722 * now the link went idle && the timer fired. 723 * We don't have a block to close.So we open this 724 * block and restart the timer. 725 * opening a block thaws the queue,restarts timer 726 * Thawing/timer-refresh is a side effect. 727 */ 728 prb_open_block(pkc, pbd); 729 } 730 } 731 732 hrtimer_forward_now(&pkc->retire_blk_timer, pkc->interval_ktime); 733 spin_unlock(&po->sk.sk_receive_queue.lock); 734 return HRTIMER_RESTART; 735 } 736 737 static void prb_flush_block(struct tpacket_kbdq_core *pkc1, 738 struct tpacket_block_desc *pbd1, __u32 status) 739 { 740 /* Flush everything minus the block header */ 741 742 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 743 u8 *start, *end; 744 745 start = (u8 *)pbd1; 746 747 /* Skip the block header(we know header WILL fit in 4K) */ 748 start += PAGE_SIZE; 749 750 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); 751 for (; start < end; start += PAGE_SIZE) 752 flush_dcache_page(pgv_to_page(start)); 753 754 smp_wmb(); 755 #endif 756 757 /* Now update the block status. */ 758 759 BLOCK_STATUS(pbd1) = status; 760 761 /* Flush the block header */ 762 763 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 764 start = (u8 *)pbd1; 765 flush_dcache_page(pgv_to_page(start)); 766 767 smp_wmb(); 768 #endif 769 } 770 771 /* 772 * Side effect: 773 * 774 * 1) flush the block 775 * 2) Increment active_blk_num 776 * 777 * Note:We DONT refresh the timer on purpose. 778 * Because almost always the next block will be opened. 779 */ 780 static void prb_close_block(struct tpacket_kbdq_core *pkc1, 781 struct tpacket_block_desc *pbd1, 782 struct packet_sock *po, unsigned int stat) 783 { 784 __u32 status = TP_STATUS_USER | stat; 785 786 struct tpacket3_hdr *last_pkt; 787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 788 struct sock *sk = &po->sk; 789 790 if (atomic_read(&po->tp_drops)) 791 status |= TP_STATUS_LOSING; 792 793 last_pkt = (struct tpacket3_hdr *)pkc1->prev; 794 last_pkt->tp_next_offset = 0; 795 796 /* Get the ts of the last pkt */ 797 if (BLOCK_NUM_PKTS(pbd1)) { 798 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; 799 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; 800 } else { 801 /* Ok, we tmo'd - so get the current time. 802 * 803 * It shouldn't really happen as we don't close empty 804 * blocks. See prb_retire_rx_blk_timer_expired(). 805 */ 806 struct timespec64 ts; 807 ktime_get_real_ts64(&ts); 808 h1->ts_last_pkt.ts_sec = ts.tv_sec; 809 h1->ts_last_pkt.ts_nsec = ts.tv_nsec; 810 } 811 812 smp_wmb(); 813 814 /* Flush the block */ 815 prb_flush_block(pkc1, pbd1, status); 816 817 sk->sk_data_ready(sk); 818 819 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 820 } 821 822 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) 823 { 824 pkc->reset_pending_on_curr_blk = 0; 825 } 826 827 /* 828 * prb_open_block is called by tpacket_rcv or timer callback. 829 * 830 * Reasons why NOT update hrtimer in prb_open_block: 831 * 1) It will increase complexity to distinguish the two caller scenario. 832 * 2) hrtimer_cancel and hrtimer_start need to be called if you want to update 833 * TMO of an already enqueued hrtimer, leading to complex shutdown logic. 834 * 835 * One side effect of NOT update hrtimer when called by tpacket_rcv is that 836 * a newly opened block triggered by tpacket_rcv may be retired earlier than 837 * expected. On the other hand, if timeout is updated in prb_open_block, the 838 * frequent reception of network packets that leads to prb_open_block being 839 * called may cause hrtimer to be removed and enqueued repeatedly. 840 */ 841 static void prb_open_block(struct tpacket_kbdq_core *pkc1, 842 struct tpacket_block_desc *pbd1) 843 { 844 struct timespec64 ts; 845 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 846 847 smp_rmb(); 848 849 /* We could have just memset this but we will lose the 850 * flexibility of making the priv area sticky 851 */ 852 853 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; 854 BLOCK_NUM_PKTS(pbd1) = 0; 855 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 856 857 ktime_get_real_ts64(&ts); 858 859 h1->ts_first_pkt.ts_sec = ts.tv_sec; 860 h1->ts_first_pkt.ts_nsec = ts.tv_nsec; 861 862 pkc1->pkblk_start = (char *)pbd1; 863 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 864 865 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); 866 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; 867 868 pbd1->version = pkc1->version; 869 pkc1->prev = pkc1->nxt_offset; 870 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; 871 872 prb_thaw_queue(pkc1); 873 874 smp_wmb(); 875 } 876 877 /* 878 * Queue freeze logic: 879 * 1) Assume tp_block_nr = 8 blocks. 880 * 2) At time 't0', user opens Rx ring. 881 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 882 * 4) user-space is either sleeping or processing block '0'. 883 * 5) tpacket_rcv is currently filling block '7', since there is no space left, 884 * it will close block-7,loop around and try to fill block '0'. 885 * call-flow: 886 * __packet_lookup_frame_in_block 887 * prb_retire_current_block() 888 * prb_dispatch_next_block() 889 * |->(BLOCK_STATUS == USER) evaluates to true 890 * 5.1) Since block-0 is currently in-use, we just freeze the queue. 891 * 6) Now there are two cases: 892 * 6.1) Link goes idle right after the queue is frozen. 893 * But remember, the last open_block() refreshed the timer. 894 * When this timer expires,it will refresh itself so that we can 895 * re-open block-0 in near future. 896 * 6.2) Link is busy and keeps on receiving packets. This is a simple 897 * case and __packet_lookup_frame_in_block will check if block-0 898 * is free and can now be re-used. 899 */ 900 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, 901 struct packet_sock *po) 902 { 903 pkc->reset_pending_on_curr_blk = 1; 904 po->stats.stats3.tp_freeze_q_cnt++; 905 } 906 907 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) 908 909 /* 910 * If the next block is free then we will dispatch it 911 * and return a good offset. 912 * Else, we will freeze the queue. 913 * So, caller must check the return value. 914 */ 915 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, 916 struct packet_sock *po) 917 { 918 struct tpacket_block_desc *pbd; 919 920 smp_rmb(); 921 922 /* 1. Get current block num */ 923 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 924 925 /* 2. If this block is currently in_use then freeze the queue */ 926 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { 927 prb_freeze_queue(pkc, po); 928 return NULL; 929 } 930 931 /* 932 * 3. 933 * open this block and return the offset where the first packet 934 * needs to get stored. 935 */ 936 prb_open_block(pkc, pbd); 937 return (void *)pkc->nxt_offset; 938 } 939 940 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, 941 struct packet_sock *po, unsigned int status) 942 { 943 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 944 945 /* retire/close the current block */ 946 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { 947 /* 948 * Plug the case where copy_bits() is in progress on 949 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 950 * have space to copy the pkt in the current block and 951 * called prb_retire_current_block() 952 * 953 * We don't need to worry about the TMO case because 954 * the timer-handler already handled this case. 955 */ 956 if (!(status & TP_STATUS_BLK_TMO)) { 957 /* Waiting for skb_copy_bits to finish... */ 958 write_lock(&pkc->blk_fill_in_prog_lock); 959 write_unlock(&pkc->blk_fill_in_prog_lock); 960 } 961 prb_close_block(pkc, pbd, po, status); 962 return; 963 } 964 } 965 966 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) 967 { 968 return TP_STATUS_USER & BLOCK_STATUS(pbd); 969 } 970 971 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) 972 { 973 return pkc->reset_pending_on_curr_blk; 974 } 975 976 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) 977 __releases(&pkc->blk_fill_in_prog_lock) 978 { 979 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 980 981 read_unlock(&pkc->blk_fill_in_prog_lock); 982 } 983 984 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, 985 struct tpacket3_hdr *ppd) 986 { 987 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); 988 } 989 990 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, 991 struct tpacket3_hdr *ppd) 992 { 993 ppd->hv1.tp_rxhash = 0; 994 } 995 996 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 997 struct tpacket3_hdr *ppd) 998 { 999 struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc); 1000 1001 if (skb_vlan_tag_present(pkc->skb)) { 1002 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 1003 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 1004 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1005 } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) { 1006 ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev); 1007 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol); 1008 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1009 } else { 1010 ppd->hv1.tp_vlan_tci = 0; 1011 ppd->hv1.tp_vlan_tpid = 0; 1012 ppd->tp_status = TP_STATUS_AVAILABLE; 1013 } 1014 } 1015 1016 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, 1017 struct tpacket3_hdr *ppd) 1018 { 1019 ppd->hv1.tp_padding = 0; 1020 prb_fill_vlan_info(pkc, ppd); 1021 1022 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) 1023 prb_fill_rxhash(pkc, ppd); 1024 else 1025 prb_clear_rxhash(pkc, ppd); 1026 } 1027 1028 static void prb_fill_curr_block(char *curr, 1029 struct tpacket_kbdq_core *pkc, 1030 struct tpacket_block_desc *pbd, 1031 unsigned int len) 1032 __acquires(&pkc->blk_fill_in_prog_lock) 1033 { 1034 struct tpacket3_hdr *ppd; 1035 1036 ppd = (struct tpacket3_hdr *)curr; 1037 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); 1038 pkc->prev = curr; 1039 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); 1040 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); 1041 BLOCK_NUM_PKTS(pbd) += 1; 1042 read_lock(&pkc->blk_fill_in_prog_lock); 1043 prb_run_all_ft_ops(pkc, ppd); 1044 } 1045 1046 /* Assumes caller has the sk->rx_queue.lock */ 1047 static void *__packet_lookup_frame_in_block(struct packet_sock *po, 1048 struct sk_buff *skb, 1049 unsigned int len 1050 ) 1051 { 1052 struct tpacket_kbdq_core *pkc; 1053 struct tpacket_block_desc *pbd; 1054 char *curr, *end; 1055 1056 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); 1057 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1058 1059 /* Queue is frozen when user space is lagging behind */ 1060 if (prb_queue_frozen(pkc)) { 1061 /* 1062 * Check if that last block which caused the queue to freeze, 1063 * is still in_use by user-space. 1064 */ 1065 if (prb_curr_blk_in_use(pbd)) { 1066 /* Can't record this packet */ 1067 return NULL; 1068 } else { 1069 /* 1070 * Ok, the block was released by user-space. 1071 * Now let's open that block. 1072 * opening a block also thaws the queue. 1073 * Thawing is a side effect. 1074 */ 1075 prb_open_block(pkc, pbd); 1076 } 1077 } 1078 1079 smp_mb(); 1080 curr = pkc->nxt_offset; 1081 pkc->skb = skb; 1082 end = (char *)pbd + pkc->kblk_size; 1083 1084 /* first try the current block */ 1085 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { 1086 prb_fill_curr_block(curr, pkc, pbd, len); 1087 return (void *)curr; 1088 } 1089 1090 /* Ok, close the current block */ 1091 prb_retire_current_block(pkc, po, 0); 1092 1093 /* Now, try to dispatch the next block */ 1094 curr = (char *)prb_dispatch_next_block(pkc, po); 1095 if (curr) { 1096 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); 1097 prb_fill_curr_block(curr, pkc, pbd, len); 1098 return (void *)curr; 1099 } 1100 1101 /* 1102 * No free blocks are available.user_space hasn't caught up yet. 1103 * Queue was just frozen and now this packet will get dropped. 1104 */ 1105 return NULL; 1106 } 1107 1108 static void *packet_current_rx_frame(struct packet_sock *po, 1109 struct sk_buff *skb, 1110 int status, unsigned int len) 1111 { 1112 char *curr = NULL; 1113 switch (po->tp_version) { 1114 case TPACKET_V1: 1115 case TPACKET_V2: 1116 curr = packet_lookup_frame(po, &po->rx_ring, 1117 po->rx_ring.head, status); 1118 return curr; 1119 case TPACKET_V3: 1120 return __packet_lookup_frame_in_block(po, skb, len); 1121 default: 1122 WARN(1, "TPACKET version not supported\n"); 1123 BUG(); 1124 return NULL; 1125 } 1126 } 1127 1128 static void *prb_lookup_block(const struct packet_sock *po, 1129 const struct packet_ring_buffer *rb, 1130 unsigned int idx, 1131 int status) 1132 { 1133 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 1134 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); 1135 1136 if (status != BLOCK_STATUS(pbd)) 1137 return NULL; 1138 return pbd; 1139 } 1140 1141 static int prb_previous_blk_num(struct packet_ring_buffer *rb) 1142 { 1143 unsigned int prev; 1144 if (rb->prb_bdqc.kactive_blk_num) 1145 prev = rb->prb_bdqc.kactive_blk_num-1; 1146 else 1147 prev = rb->prb_bdqc.knum_blocks-1; 1148 return prev; 1149 } 1150 1151 /* Assumes caller has held the rx_queue.lock */ 1152 static void *__prb_previous_block(struct packet_sock *po, 1153 struct packet_ring_buffer *rb, 1154 int status) 1155 { 1156 unsigned int previous = prb_previous_blk_num(rb); 1157 return prb_lookup_block(po, rb, previous, status); 1158 } 1159 1160 static void *packet_previous_rx_frame(struct packet_sock *po, 1161 struct packet_ring_buffer *rb, 1162 int status) 1163 { 1164 if (po->tp_version <= TPACKET_V2) 1165 return packet_previous_frame(po, rb, status); 1166 1167 return __prb_previous_block(po, rb, status); 1168 } 1169 1170 static void packet_increment_rx_head(struct packet_sock *po, 1171 struct packet_ring_buffer *rb) 1172 { 1173 switch (po->tp_version) { 1174 case TPACKET_V1: 1175 case TPACKET_V2: 1176 return packet_increment_head(rb); 1177 case TPACKET_V3: 1178 default: 1179 WARN(1, "TPACKET version not supported.\n"); 1180 BUG(); 1181 return; 1182 } 1183 } 1184 1185 static void *packet_previous_frame(struct packet_sock *po, 1186 struct packet_ring_buffer *rb, 1187 int status) 1188 { 1189 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; 1190 return packet_lookup_frame(po, rb, previous, status); 1191 } 1192 1193 static void packet_increment_head(struct packet_ring_buffer *buff) 1194 { 1195 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1196 } 1197 1198 static void packet_inc_pending(struct packet_ring_buffer *rb) 1199 { 1200 this_cpu_inc(*rb->pending_refcnt); 1201 } 1202 1203 static void packet_dec_pending(struct packet_ring_buffer *rb) 1204 { 1205 this_cpu_dec(*rb->pending_refcnt); 1206 } 1207 1208 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) 1209 { 1210 unsigned int refcnt = 0; 1211 int cpu; 1212 1213 /* We don't use pending refcount in rx_ring. */ 1214 if (rb->pending_refcnt == NULL) 1215 return 0; 1216 1217 for_each_possible_cpu(cpu) 1218 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); 1219 1220 return refcnt; 1221 } 1222 1223 static int packet_alloc_pending(struct packet_sock *po) 1224 { 1225 po->rx_ring.pending_refcnt = NULL; 1226 1227 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); 1228 if (unlikely(po->tx_ring.pending_refcnt == NULL)) 1229 return -ENOBUFS; 1230 1231 return 0; 1232 } 1233 1234 static void packet_free_pending(struct packet_sock *po) 1235 { 1236 free_percpu(po->tx_ring.pending_refcnt); 1237 } 1238 1239 #define ROOM_POW_OFF 2 1240 #define ROOM_NONE 0x0 1241 #define ROOM_LOW 0x1 1242 #define ROOM_NORMAL 0x2 1243 1244 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) 1245 { 1246 int idx, len; 1247 1248 len = READ_ONCE(po->rx_ring.frame_max) + 1; 1249 idx = READ_ONCE(po->rx_ring.head); 1250 if (pow_off) 1251 idx += len >> pow_off; 1252 if (idx >= len) 1253 idx -= len; 1254 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1255 } 1256 1257 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) 1258 { 1259 int idx, len; 1260 1261 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); 1262 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); 1263 if (pow_off) 1264 idx += len >> pow_off; 1265 if (idx >= len) 1266 idx -= len; 1267 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); 1268 } 1269 1270 static int __packet_rcv_has_room(const struct packet_sock *po, 1271 const struct sk_buff *skb) 1272 { 1273 const struct sock *sk = &po->sk; 1274 int ret = ROOM_NONE; 1275 1276 if (po->prot_hook.func != tpacket_rcv) { 1277 int rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1278 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc) 1279 - (skb ? skb->truesize : 0); 1280 1281 if (avail > (rcvbuf >> ROOM_POW_OFF)) 1282 return ROOM_NORMAL; 1283 else if (avail > 0) 1284 return ROOM_LOW; 1285 else 1286 return ROOM_NONE; 1287 } 1288 1289 if (po->tp_version == TPACKET_V3) { 1290 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) 1291 ret = ROOM_NORMAL; 1292 else if (__tpacket_v3_has_room(po, 0)) 1293 ret = ROOM_LOW; 1294 } else { 1295 if (__tpacket_has_room(po, ROOM_POW_OFF)) 1296 ret = ROOM_NORMAL; 1297 else if (__tpacket_has_room(po, 0)) 1298 ret = ROOM_LOW; 1299 } 1300 1301 return ret; 1302 } 1303 1304 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) 1305 { 1306 bool pressure; 1307 int ret; 1308 1309 ret = __packet_rcv_has_room(po, skb); 1310 pressure = ret != ROOM_NORMAL; 1311 1312 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure) 1313 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure); 1314 1315 return ret; 1316 } 1317 1318 static void packet_rcv_try_clear_pressure(struct packet_sock *po) 1319 { 1320 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) && 1321 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 1322 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false); 1323 } 1324 1325 static void packet_sock_destruct(struct sock *sk) 1326 { 1327 skb_queue_purge(&sk->sk_error_queue); 1328 1329 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 1330 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 1331 1332 if (!sock_flag(sk, SOCK_DEAD)) { 1333 pr_err("Attempt to release alive packet socket: %p\n", sk); 1334 return; 1335 } 1336 } 1337 1338 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) 1339 { 1340 u32 *history = po->rollover->history; 1341 u32 victim, rxhash; 1342 int i, count = 0; 1343 1344 rxhash = skb_get_hash(skb); 1345 for (i = 0; i < ROLLOVER_HLEN; i++) 1346 if (READ_ONCE(history[i]) == rxhash) 1347 count++; 1348 1349 victim = get_random_u32_below(ROLLOVER_HLEN); 1350 1351 /* Avoid dirtying the cache line if possible */ 1352 if (READ_ONCE(history[victim]) != rxhash) 1353 WRITE_ONCE(history[victim], rxhash); 1354 1355 return count > (ROLLOVER_HLEN >> 1); 1356 } 1357 1358 static unsigned int fanout_demux_hash(struct packet_fanout *f, 1359 struct sk_buff *skb, 1360 unsigned int num) 1361 { 1362 return reciprocal_scale(__skb_get_hash_symmetric(skb), num); 1363 } 1364 1365 static unsigned int fanout_demux_lb(struct packet_fanout *f, 1366 struct sk_buff *skb, 1367 unsigned int num) 1368 { 1369 unsigned int val = atomic_inc_return(&f->rr_cur); 1370 1371 return val % num; 1372 } 1373 1374 static unsigned int fanout_demux_cpu(struct packet_fanout *f, 1375 struct sk_buff *skb, 1376 unsigned int num) 1377 { 1378 return smp_processor_id() % num; 1379 } 1380 1381 static unsigned int fanout_demux_rnd(struct packet_fanout *f, 1382 struct sk_buff *skb, 1383 unsigned int num) 1384 { 1385 return get_random_u32_below(num); 1386 } 1387 1388 static unsigned int fanout_demux_rollover(struct packet_fanout *f, 1389 struct sk_buff *skb, 1390 unsigned int idx, bool try_self, 1391 unsigned int num) 1392 { 1393 struct packet_sock *po, *po_next, *po_skip = NULL; 1394 unsigned int i, j, room = ROOM_NONE; 1395 1396 po = pkt_sk(rcu_dereference(f->arr[idx])); 1397 1398 if (try_self) { 1399 room = packet_rcv_has_room(po, skb); 1400 if (room == ROOM_NORMAL || 1401 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) 1402 return idx; 1403 po_skip = po; 1404 } 1405 1406 i = j = min_t(int, po->rollover->sock, num - 1); 1407 do { 1408 po_next = pkt_sk(rcu_dereference(f->arr[i])); 1409 if (po_next != po_skip && 1410 !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) && 1411 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { 1412 if (i != j) 1413 po->rollover->sock = i; 1414 atomic_long_inc(&po->rollover->num); 1415 if (room == ROOM_LOW) 1416 atomic_long_inc(&po->rollover->num_huge); 1417 return i; 1418 } 1419 1420 if (++i == num) 1421 i = 0; 1422 } while (i != j); 1423 1424 atomic_long_inc(&po->rollover->num_failed); 1425 return idx; 1426 } 1427 1428 static unsigned int fanout_demux_qm(struct packet_fanout *f, 1429 struct sk_buff *skb, 1430 unsigned int num) 1431 { 1432 return skb_get_queue_mapping(skb) % num; 1433 } 1434 1435 static unsigned int fanout_demux_bpf(struct packet_fanout *f, 1436 struct sk_buff *skb, 1437 unsigned int num) 1438 { 1439 struct bpf_prog *prog; 1440 unsigned int ret = 0; 1441 1442 rcu_read_lock(); 1443 prog = rcu_dereference(f->bpf_prog); 1444 if (prog) 1445 ret = bpf_prog_run_clear_cb(prog, skb) % num; 1446 rcu_read_unlock(); 1447 1448 return ret; 1449 } 1450 1451 static bool fanout_has_flag(struct packet_fanout *f, u16 flag) 1452 { 1453 return f->flags & (flag >> 8); 1454 } 1455 1456 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1457 struct packet_type *pt, struct net_device *orig_dev) 1458 { 1459 struct packet_fanout *f = pt->af_packet_priv; 1460 unsigned int num = READ_ONCE(f->num_members); 1461 struct net *net = read_pnet(&f->net); 1462 struct packet_sock *po; 1463 unsigned int idx; 1464 1465 if (!net_eq(dev_net(dev), net) || !num) { 1466 kfree_skb(skb); 1467 return 0; 1468 } 1469 1470 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { 1471 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); 1472 if (!skb) 1473 return 0; 1474 } 1475 switch (f->type) { 1476 case PACKET_FANOUT_HASH: 1477 default: 1478 idx = fanout_demux_hash(f, skb, num); 1479 break; 1480 case PACKET_FANOUT_LB: 1481 idx = fanout_demux_lb(f, skb, num); 1482 break; 1483 case PACKET_FANOUT_CPU: 1484 idx = fanout_demux_cpu(f, skb, num); 1485 break; 1486 case PACKET_FANOUT_RND: 1487 idx = fanout_demux_rnd(f, skb, num); 1488 break; 1489 case PACKET_FANOUT_QM: 1490 idx = fanout_demux_qm(f, skb, num); 1491 break; 1492 case PACKET_FANOUT_ROLLOVER: 1493 idx = fanout_demux_rollover(f, skb, 0, false, num); 1494 break; 1495 case PACKET_FANOUT_CBPF: 1496 case PACKET_FANOUT_EBPF: 1497 idx = fanout_demux_bpf(f, skb, num); 1498 break; 1499 } 1500 1501 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) 1502 idx = fanout_demux_rollover(f, skb, idx, true, num); 1503 1504 po = pkt_sk(rcu_dereference(f->arr[idx])); 1505 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1506 } 1507 1508 DEFINE_MUTEX(fanout_mutex); 1509 EXPORT_SYMBOL_GPL(fanout_mutex); 1510 static LIST_HEAD(fanout_list); 1511 static u16 fanout_next_id; 1512 1513 static void __fanout_link(struct sock *sk, struct packet_sock *po) 1514 { 1515 struct packet_fanout *f = po->fanout; 1516 1517 spin_lock(&f->lock); 1518 rcu_assign_pointer(f->arr[f->num_members], sk); 1519 smp_wmb(); 1520 f->num_members++; 1521 if (f->num_members == 1) 1522 dev_add_pack(&f->prot_hook); 1523 spin_unlock(&f->lock); 1524 } 1525 1526 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) 1527 { 1528 struct packet_fanout *f = po->fanout; 1529 int i; 1530 1531 spin_lock(&f->lock); 1532 for (i = 0; i < f->num_members; i++) { 1533 if (rcu_dereference_protected(f->arr[i], 1534 lockdep_is_held(&f->lock)) == sk) 1535 break; 1536 } 1537 BUG_ON(i >= f->num_members); 1538 rcu_assign_pointer(f->arr[i], 1539 rcu_dereference_protected(f->arr[f->num_members - 1], 1540 lockdep_is_held(&f->lock))); 1541 f->num_members--; 1542 if (f->num_members == 0) 1543 __dev_remove_pack(&f->prot_hook); 1544 spin_unlock(&f->lock); 1545 } 1546 1547 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) 1548 { 1549 if (sk->sk_family != PF_PACKET) 1550 return false; 1551 1552 return ptype->af_packet_priv == pkt_sk(sk)->fanout; 1553 } 1554 1555 static void fanout_init_data(struct packet_fanout *f) 1556 { 1557 switch (f->type) { 1558 case PACKET_FANOUT_LB: 1559 atomic_set(&f->rr_cur, 0); 1560 break; 1561 case PACKET_FANOUT_CBPF: 1562 case PACKET_FANOUT_EBPF: 1563 RCU_INIT_POINTER(f->bpf_prog, NULL); 1564 break; 1565 } 1566 } 1567 1568 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) 1569 { 1570 struct bpf_prog *old; 1571 1572 spin_lock(&f->lock); 1573 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); 1574 rcu_assign_pointer(f->bpf_prog, new); 1575 spin_unlock(&f->lock); 1576 1577 if (old) { 1578 synchronize_net(); 1579 bpf_prog_destroy(old); 1580 } 1581 } 1582 1583 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, 1584 unsigned int len) 1585 { 1586 struct bpf_prog *new; 1587 struct sock_fprog fprog; 1588 int ret; 1589 1590 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1591 return -EPERM; 1592 1593 ret = copy_bpf_fprog_from_user(&fprog, data, len); 1594 if (ret) 1595 return ret; 1596 1597 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); 1598 if (ret) 1599 return ret; 1600 1601 __fanout_set_data_bpf(po->fanout, new); 1602 return 0; 1603 } 1604 1605 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, 1606 unsigned int len) 1607 { 1608 struct bpf_prog *new; 1609 u32 fd; 1610 1611 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) 1612 return -EPERM; 1613 if (len != sizeof(fd)) 1614 return -EINVAL; 1615 if (copy_from_sockptr(&fd, data, len)) 1616 return -EFAULT; 1617 1618 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 1619 if (IS_ERR(new)) 1620 return PTR_ERR(new); 1621 1622 __fanout_set_data_bpf(po->fanout, new); 1623 return 0; 1624 } 1625 1626 static int fanout_set_data(struct packet_sock *po, sockptr_t data, 1627 unsigned int len) 1628 { 1629 switch (po->fanout->type) { 1630 case PACKET_FANOUT_CBPF: 1631 return fanout_set_data_cbpf(po, data, len); 1632 case PACKET_FANOUT_EBPF: 1633 return fanout_set_data_ebpf(po, data, len); 1634 default: 1635 return -EINVAL; 1636 } 1637 } 1638 1639 static void fanout_release_data(struct packet_fanout *f) 1640 { 1641 switch (f->type) { 1642 case PACKET_FANOUT_CBPF: 1643 case PACKET_FANOUT_EBPF: 1644 __fanout_set_data_bpf(f, NULL); 1645 } 1646 } 1647 1648 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) 1649 { 1650 struct packet_fanout *f; 1651 1652 list_for_each_entry(f, &fanout_list, list) { 1653 if (f->id == candidate_id && 1654 read_pnet(&f->net) == sock_net(sk)) { 1655 return false; 1656 } 1657 } 1658 return true; 1659 } 1660 1661 static bool fanout_find_new_id(struct sock *sk, u16 *new_id) 1662 { 1663 u16 id = fanout_next_id; 1664 1665 do { 1666 if (__fanout_id_is_free(sk, id)) { 1667 *new_id = id; 1668 fanout_next_id = id + 1; 1669 return true; 1670 } 1671 1672 id++; 1673 } while (id != fanout_next_id); 1674 1675 return false; 1676 } 1677 1678 static int fanout_add(struct sock *sk, struct fanout_args *args) 1679 { 1680 struct packet_rollover *rollover = NULL; 1681 struct packet_sock *po = pkt_sk(sk); 1682 u16 type_flags = args->type_flags; 1683 struct packet_fanout *f, *match; 1684 u8 type = type_flags & 0xff; 1685 u8 flags = type_flags >> 8; 1686 u16 id = args->id; 1687 int err; 1688 1689 switch (type) { 1690 case PACKET_FANOUT_ROLLOVER: 1691 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) 1692 return -EINVAL; 1693 break; 1694 case PACKET_FANOUT_HASH: 1695 case PACKET_FANOUT_LB: 1696 case PACKET_FANOUT_CPU: 1697 case PACKET_FANOUT_RND: 1698 case PACKET_FANOUT_QM: 1699 case PACKET_FANOUT_CBPF: 1700 case PACKET_FANOUT_EBPF: 1701 break; 1702 default: 1703 return -EINVAL; 1704 } 1705 1706 mutex_lock(&fanout_mutex); 1707 1708 err = -EALREADY; 1709 if (po->fanout) 1710 goto out; 1711 1712 if (type == PACKET_FANOUT_ROLLOVER || 1713 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1714 err = -ENOMEM; 1715 rollover = kzalloc_obj(*rollover); 1716 if (!rollover) 1717 goto out; 1718 atomic_long_set(&rollover->num, 0); 1719 atomic_long_set(&rollover->num_huge, 0); 1720 atomic_long_set(&rollover->num_failed, 0); 1721 } 1722 1723 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1724 if (id != 0) { 1725 err = -EINVAL; 1726 goto out; 1727 } 1728 if (!fanout_find_new_id(sk, &id)) { 1729 err = -ENOMEM; 1730 goto out; 1731 } 1732 /* ephemeral flag for the first socket in the group: drop it */ 1733 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); 1734 } 1735 1736 match = NULL; 1737 list_for_each_entry(f, &fanout_list, list) { 1738 if (f->id == id && 1739 read_pnet(&f->net) == sock_net(sk)) { 1740 match = f; 1741 break; 1742 } 1743 } 1744 err = -EINVAL; 1745 if (match) { 1746 if (match->flags != flags) 1747 goto out; 1748 if (args->max_num_members && 1749 args->max_num_members != match->max_num_members) 1750 goto out; 1751 } else { 1752 if (args->max_num_members > PACKET_FANOUT_MAX) 1753 goto out; 1754 if (!args->max_num_members) 1755 /* legacy PACKET_FANOUT_MAX */ 1756 args->max_num_members = 256; 1757 err = -ENOMEM; 1758 match = kvzalloc_flex(*match, arr, args->max_num_members); 1759 if (!match) 1760 goto out; 1761 write_pnet(&match->net, sock_net(sk)); 1762 match->id = id; 1763 match->type = type; 1764 match->flags = flags; 1765 INIT_LIST_HEAD(&match->list); 1766 spin_lock_init(&match->lock); 1767 refcount_set(&match->sk_ref, 0); 1768 fanout_init_data(match); 1769 match->prot_hook.type = po->prot_hook.type; 1770 match->prot_hook.dev = po->prot_hook.dev; 1771 match->prot_hook.func = packet_rcv_fanout; 1772 match->prot_hook.af_packet_priv = match; 1773 match->prot_hook.af_packet_net = read_pnet(&match->net); 1774 match->prot_hook.id_match = match_fanout_group; 1775 match->max_num_members = args->max_num_members; 1776 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING; 1777 list_add(&match->list, &fanout_list); 1778 } 1779 err = -EINVAL; 1780 1781 spin_lock(&po->bind_lock); 1782 if (po->num && 1783 match->type == type && 1784 match->prot_hook.type == po->prot_hook.type && 1785 match->prot_hook.dev == po->prot_hook.dev) { 1786 err = -ENOSPC; 1787 if (refcount_read(&match->sk_ref) < match->max_num_members) { 1788 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ 1789 WRITE_ONCE(po->fanout, match); 1790 1791 po->rollover = rollover; 1792 rollover = NULL; 1793 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1794 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 1795 __dev_remove_pack(&po->prot_hook); 1796 __fanout_link(sk, po); 1797 } 1798 err = 0; 1799 } 1800 } 1801 spin_unlock(&po->bind_lock); 1802 1803 if (err && !refcount_read(&match->sk_ref)) { 1804 list_del(&match->list); 1805 kvfree(match); 1806 } 1807 1808 out: 1809 kfree(rollover); 1810 mutex_unlock(&fanout_mutex); 1811 return err; 1812 } 1813 1814 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes 1815 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. 1816 * It is the responsibility of the caller to call fanout_release_data() and 1817 * free the returned packet_fanout (after synchronize_net()) 1818 */ 1819 static struct packet_fanout *fanout_release(struct sock *sk) 1820 { 1821 struct packet_sock *po = pkt_sk(sk); 1822 struct packet_fanout *f; 1823 1824 mutex_lock(&fanout_mutex); 1825 f = po->fanout; 1826 if (f) { 1827 po->fanout = NULL; 1828 1829 if (refcount_dec_and_test(&f->sk_ref)) 1830 list_del(&f->list); 1831 else 1832 f = NULL; 1833 } 1834 mutex_unlock(&fanout_mutex); 1835 1836 return f; 1837 } 1838 1839 static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1840 struct sk_buff *skb) 1841 { 1842 /* Earlier code assumed this would be a VLAN pkt, double-check 1843 * this now that we have the actual packet in hand. We can only 1844 * do this check on Ethernet devices. 1845 */ 1846 if (unlikely(dev->type != ARPHRD_ETHER)) 1847 return false; 1848 1849 skb_reset_mac_header(skb); 1850 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); 1851 } 1852 1853 static const struct proto_ops packet_ops; 1854 1855 static const struct proto_ops packet_ops_spkt; 1856 1857 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, 1858 struct packet_type *pt, struct net_device *orig_dev) 1859 { 1860 struct sock *sk; 1861 struct sockaddr_pkt *spkt; 1862 1863 /* 1864 * When we registered the protocol we saved the socket in the data 1865 * field for just this event. 1866 */ 1867 1868 sk = pt->af_packet_priv; 1869 1870 /* 1871 * Yank back the headers [hope the device set this 1872 * right or kerboom...] 1873 * 1874 * Incoming packets have ll header pulled, 1875 * push it back. 1876 * 1877 * For outgoing ones skb->data == skb_mac_header(skb) 1878 * so that this procedure is noop. 1879 */ 1880 1881 if (skb->pkt_type == PACKET_LOOPBACK) 1882 goto out; 1883 1884 if (!net_eq(dev_net(dev), sock_net(sk))) 1885 goto out; 1886 1887 skb = skb_share_check(skb, GFP_ATOMIC); 1888 if (skb == NULL) 1889 goto oom; 1890 1891 /* drop any routing info */ 1892 skb_dst_drop(skb); 1893 1894 /* drop conntrack reference */ 1895 nf_reset_ct(skb); 1896 1897 spkt = &PACKET_SKB_CB(skb)->sa.pkt; 1898 1899 skb_push(skb, skb->data - skb_mac_header(skb)); 1900 1901 /* 1902 * The SOCK_PACKET socket receives _all_ frames. 1903 */ 1904 1905 spkt->spkt_family = dev->type; 1906 strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); 1907 spkt->spkt_protocol = skb->protocol; 1908 1909 /* 1910 * Charge the memory to the socket. This is done specifically 1911 * to prevent sockets using all the memory up. 1912 */ 1913 1914 if (sock_queue_rcv_skb(sk, skb) == 0) 1915 return 0; 1916 1917 out: 1918 kfree_skb(skb); 1919 oom: 1920 return 0; 1921 } 1922 1923 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) 1924 { 1925 int depth; 1926 1927 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && 1928 sock->type == SOCK_RAW) { 1929 skb_reset_mac_header(skb); 1930 skb->protocol = dev_parse_header_protocol(skb); 1931 } 1932 1933 /* Move network header to the right position for VLAN tagged packets */ 1934 if (likely(skb->dev->type == ARPHRD_ETHER) && 1935 eth_type_vlan(skb->protocol) && 1936 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) 1937 skb_set_network_header(skb, depth); 1938 1939 skb_probe_transport_header(skb); 1940 } 1941 1942 /* 1943 * Output a raw packet to a device layer. This bypasses all the other 1944 * protocol layers and you must therefore supply it with a complete frame 1945 */ 1946 1947 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, 1948 size_t len) 1949 { 1950 struct sock *sk = sock->sk; 1951 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); 1952 struct sk_buff *skb = NULL; 1953 struct net_device *dev; 1954 struct sockcm_cookie sockc; 1955 __be16 proto = 0; 1956 int err; 1957 int extra_len = 0; 1958 1959 /* 1960 * Get and verify the address. 1961 */ 1962 1963 if (saddr) { 1964 if (msg->msg_namelen < sizeof(struct sockaddr)) 1965 return -EINVAL; 1966 if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) 1967 proto = saddr->spkt_protocol; 1968 } else 1969 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ 1970 1971 /* 1972 * Find the device first to size check it 1973 */ 1974 1975 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; 1976 retry: 1977 rcu_read_lock(); 1978 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 1979 err = -ENODEV; 1980 if (dev == NULL) 1981 goto out_unlock; 1982 1983 err = -ENETDOWN; 1984 if (!(dev->flags & IFF_UP)) 1985 goto out_unlock; 1986 1987 /* 1988 * You may not queue a frame bigger than the mtu. This is the lowest level 1989 * raw protocol and you must do your own fragmentation at this level. 1990 */ 1991 1992 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 1993 if (!netif_supports_nofcs(dev)) { 1994 err = -EPROTONOSUPPORT; 1995 goto out_unlock; 1996 } 1997 extra_len = 4; /* We're doing our own CRC */ 1998 } 1999 2000 err = -EMSGSIZE; 2001 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) 2002 goto out_unlock; 2003 2004 if (!skb) { 2005 size_t reserved = LL_RESERVED_SPACE(dev); 2006 int tlen = dev->needed_tailroom; 2007 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; 2008 2009 rcu_read_unlock(); 2010 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); 2011 if (skb == NULL) 2012 return -ENOBUFS; 2013 /* FIXME: Save some space for broken drivers that write a hard 2014 * header at transmission time by themselves. PPP is the notable 2015 * one here. This should really be fixed at the driver level. 2016 */ 2017 skb_reserve(skb, reserved); 2018 skb_reset_network_header(skb); 2019 2020 /* Try to align data part correctly */ 2021 if (hhlen) { 2022 skb->data -= hhlen; 2023 skb->tail -= hhlen; 2024 if (len < hhlen) 2025 skb_reset_network_header(skb); 2026 } 2027 err = memcpy_from_msg(skb_put(skb, len), msg, len); 2028 if (err) 2029 goto out_free; 2030 goto retry; 2031 } 2032 2033 if (!dev_validate_header(dev, skb->data, len) || !skb->len) { 2034 err = -EINVAL; 2035 goto out_unlock; 2036 } 2037 if (len > (dev->mtu + dev->hard_header_len + extra_len) && 2038 !packet_extra_vlan_len_allowed(dev, skb)) { 2039 err = -EMSGSIZE; 2040 goto out_unlock; 2041 } 2042 2043 sockcm_init(&sockc, sk); 2044 if (msg->msg_controllen) { 2045 err = sock_cmsg_send(sk, msg, &sockc); 2046 if (unlikely(err)) 2047 goto out_unlock; 2048 } 2049 2050 skb->protocol = proto; 2051 skb->dev = dev; 2052 skb->priority = sockc.priority; 2053 skb->mark = sockc.mark; 2054 skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); 2055 skb_setup_tx_timestamp(skb, &sockc); 2056 2057 if (unlikely(extra_len == 4)) 2058 skb->no_fcs = 1; 2059 2060 packet_parse_headers(skb, sock); 2061 2062 dev_queue_xmit(skb); 2063 rcu_read_unlock(); 2064 return len; 2065 2066 out_unlock: 2067 rcu_read_unlock(); 2068 out_free: 2069 kfree_skb(skb); 2070 return err; 2071 } 2072 2073 static unsigned int run_filter(struct sk_buff *skb, 2074 const struct sock *sk, 2075 unsigned int res) 2076 { 2077 struct sk_filter *filter; 2078 2079 rcu_read_lock(); 2080 filter = rcu_dereference(sk->sk_filter); 2081 if (filter != NULL) 2082 res = bpf_prog_run_clear_cb(filter->prog, skb); 2083 rcu_read_unlock(); 2084 2085 return res; 2086 } 2087 2088 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, 2089 size_t *len, int vnet_hdr_sz) 2090 { 2091 struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 }; 2092 2093 if (*len < vnet_hdr_sz) 2094 return -EINVAL; 2095 *len -= vnet_hdr_sz; 2096 2097 if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0)) 2098 return -EINVAL; 2099 2100 return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz); 2101 } 2102 2103 /* 2104 * This function makes lazy skb cloning in hope that most of packets 2105 * are discarded by BPF. 2106 * 2107 * Note tricky part: we DO mangle shared skb! skb->data, skb->len 2108 * and skb->cb are mangled. It works because (and until) packets 2109 * falling here are owned by current CPU. Output packets are cloned 2110 * by dev_queue_xmit_nit(), input packets are processed by net_bh 2111 * sequentially, so that if we return skb to original state on exit, 2112 * we will not harm anyone. 2113 */ 2114 2115 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, 2116 struct packet_type *pt, struct net_device *orig_dev) 2117 { 2118 enum skb_drop_reason drop_reason = SKB_CONSUMED; 2119 struct sock *sk = NULL; 2120 struct sockaddr_ll *sll; 2121 struct packet_sock *po; 2122 u8 *skb_head = skb->data; 2123 int skb_len = skb->len; 2124 unsigned int snaplen, res; 2125 2126 if (skb->pkt_type == PACKET_LOOPBACK) 2127 goto drop; 2128 2129 sk = pt->af_packet_priv; 2130 po = pkt_sk(sk); 2131 2132 if (!net_eq(dev_net(dev), sock_net(sk))) 2133 goto drop; 2134 2135 skb->dev = dev; 2136 2137 if (dev_has_header(dev)) { 2138 /* The device has an explicit notion of ll header, 2139 * exported to higher levels. 2140 * 2141 * Otherwise, the device hides details of its frame 2142 * structure, so that corresponding packet head is 2143 * never delivered to user. 2144 */ 2145 if (sk->sk_type != SOCK_DGRAM) 2146 skb_push(skb, skb->data - skb_mac_header(skb)); 2147 else if (skb->pkt_type == PACKET_OUTGOING) { 2148 /* Special case: outgoing packets have ll header at head */ 2149 skb_pull(skb, skb_network_offset(skb)); 2150 } 2151 } 2152 2153 snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb); 2154 2155 res = run_filter(skb, sk, snaplen); 2156 if (!res) 2157 goto drop_n_restore; 2158 if (snaplen > res) 2159 snaplen = res; 2160 2161 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2162 goto drop_n_acct; 2163 2164 if (skb_shared(skb)) { 2165 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 2166 if (nskb == NULL) 2167 goto drop_n_acct; 2168 2169 if (skb_head != skb->data) { 2170 skb->data = skb_head; 2171 skb->len = skb_len; 2172 } 2173 consume_skb(skb); 2174 skb = nskb; 2175 } 2176 2177 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); 2178 2179 sll = &PACKET_SKB_CB(skb)->sa.ll; 2180 sll->sll_hatype = dev->type; 2181 sll->sll_pkttype = skb->pkt_type; 2182 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2183 sll->sll_ifindex = orig_dev->ifindex; 2184 else 2185 sll->sll_ifindex = dev->ifindex; 2186 2187 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2188 2189 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). 2190 * Use their space for storing the original skb length. 2191 */ 2192 PACKET_SKB_CB(skb)->sa.origlen = skb->len; 2193 2194 if (pskb_trim(skb, snaplen)) 2195 goto drop_n_acct; 2196 2197 skb_set_owner_r(skb, sk); 2198 skb->dev = NULL; 2199 skb_dst_drop(skb); 2200 2201 /* drop conntrack reference */ 2202 nf_reset_ct(skb); 2203 2204 spin_lock(&sk->sk_receive_queue.lock); 2205 po->stats.stats1.tp_packets++; 2206 sock_skb_set_dropcount(sk, skb); 2207 skb_clear_delivery_time(skb); 2208 __skb_queue_tail(&sk->sk_receive_queue, skb); 2209 spin_unlock(&sk->sk_receive_queue.lock); 2210 sk->sk_data_ready(sk); 2211 return 0; 2212 2213 drop_n_acct: 2214 atomic_inc(&po->tp_drops); 2215 sk_drops_inc(sk); 2216 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2217 2218 drop_n_restore: 2219 if (skb_head != skb->data && skb_shared(skb)) { 2220 skb->data = skb_head; 2221 skb->len = skb_len; 2222 } 2223 drop: 2224 sk_skb_reason_drop(sk, skb, drop_reason); 2225 return 0; 2226 } 2227 2228 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 2229 struct packet_type *pt, struct net_device *orig_dev) 2230 { 2231 enum skb_drop_reason drop_reason = SKB_CONSUMED; 2232 struct sock *sk = NULL; 2233 struct packet_sock *po; 2234 struct sockaddr_ll *sll; 2235 union tpacket_uhdr h; 2236 u8 *skb_head = skb->data; 2237 int skb_len = skb->len; 2238 unsigned int snaplen, res; 2239 unsigned long status = TP_STATUS_USER; 2240 unsigned short macoff, hdrlen; 2241 unsigned int netoff; 2242 struct sk_buff *copy_skb = NULL; 2243 struct timespec64 ts; 2244 __u32 ts_status; 2245 unsigned int slot_id = 0; 2246 int vnet_hdr_sz = 0; 2247 2248 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. 2249 * We may add members to them until current aligned size without forcing 2250 * userspace to call getsockopt(..., PACKET_HDRLEN, ...). 2251 */ 2252 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); 2253 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); 2254 2255 if (skb->pkt_type == PACKET_LOOPBACK) 2256 goto drop; 2257 2258 sk = pt->af_packet_priv; 2259 po = pkt_sk(sk); 2260 2261 if (!net_eq(dev_net(dev), sock_net(sk))) 2262 goto drop; 2263 2264 if (dev_has_header(dev)) { 2265 if (sk->sk_type != SOCK_DGRAM) 2266 skb_push(skb, skb->data - skb_mac_header(skb)); 2267 else if (skb->pkt_type == PACKET_OUTGOING) { 2268 /* Special case: outgoing packets have ll header at head */ 2269 skb_pull(skb, skb_network_offset(skb)); 2270 } 2271 } 2272 2273 snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb); 2274 2275 res = run_filter(skb, sk, snaplen); 2276 if (!res) 2277 goto drop_n_restore; 2278 2279 /* If we are flooded, just give up */ 2280 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { 2281 atomic_inc(&po->tp_drops); 2282 goto drop_n_restore; 2283 } 2284 2285 if (skb->ip_summed == CHECKSUM_PARTIAL) 2286 status |= TP_STATUS_CSUMNOTREADY; 2287 else if (skb->pkt_type != PACKET_OUTGOING && 2288 skb_csum_unnecessary(skb)) 2289 status |= TP_STATUS_CSUM_VALID; 2290 if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) 2291 status |= TP_STATUS_GSO_TCP; 2292 2293 if (snaplen > res) 2294 snaplen = res; 2295 2296 if (sk->sk_type == SOCK_DGRAM) { 2297 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 2298 po->tp_reserve; 2299 } else { 2300 unsigned int maclen = skb_network_offset(skb); 2301 netoff = TPACKET_ALIGN(po->tp_hdrlen + 2302 (maclen < 16 ? 16 : maclen)) + 2303 po->tp_reserve; 2304 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2305 if (vnet_hdr_sz) 2306 netoff += vnet_hdr_sz; 2307 macoff = netoff - maclen; 2308 } 2309 if (netoff > USHRT_MAX) { 2310 atomic_inc(&po->tp_drops); 2311 goto drop_n_restore; 2312 } 2313 if (po->tp_version <= TPACKET_V2) { 2314 if (macoff + snaplen > po->rx_ring.frame_size) { 2315 if (READ_ONCE(po->copy_thresh) && 2316 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 2317 if (skb_shared(skb)) { 2318 copy_skb = skb_clone(skb, GFP_ATOMIC); 2319 } else { 2320 copy_skb = skb_get(skb); 2321 skb_head = skb->data; 2322 } 2323 if (copy_skb) { 2324 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0, 2325 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); 2326 skb_set_owner_r(copy_skb, sk); 2327 } 2328 } 2329 snaplen = po->rx_ring.frame_size - macoff; 2330 if ((int)snaplen < 0) { 2331 snaplen = 0; 2332 vnet_hdr_sz = 0; 2333 } 2334 } 2335 } else if (unlikely(macoff + snaplen > 2336 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { 2337 u32 nval; 2338 2339 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; 2340 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", 2341 snaplen, nval, macoff); 2342 snaplen = nval; 2343 if (unlikely((int)snaplen < 0)) { 2344 snaplen = 0; 2345 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; 2346 vnet_hdr_sz = 0; 2347 } 2348 } 2349 spin_lock(&sk->sk_receive_queue.lock); 2350 h.raw = packet_current_rx_frame(po, skb, 2351 TP_STATUS_KERNEL, (macoff+snaplen)); 2352 if (!h.raw) 2353 goto drop_n_account; 2354 2355 if (po->tp_version <= TPACKET_V2) { 2356 slot_id = po->rx_ring.head; 2357 if (test_bit(slot_id, po->rx_ring.rx_owner_map)) 2358 goto drop_n_account; 2359 __set_bit(slot_id, po->rx_ring.rx_owner_map); 2360 } 2361 2362 if (vnet_hdr_sz && 2363 virtio_net_hdr_from_skb(skb, h.raw + macoff - 2364 sizeof(struct virtio_net_hdr), 2365 vio_le(), true, 0)) { 2366 if (po->tp_version == TPACKET_V3) 2367 prb_clear_blk_fill_status(&po->rx_ring); 2368 goto drop_n_account; 2369 } 2370 2371 if (po->tp_version <= TPACKET_V2) { 2372 packet_increment_rx_head(po, &po->rx_ring); 2373 /* 2374 * LOSING will be reported till you read the stats, 2375 * because it's COR - Clear On Read. 2376 * Anyways, moving it for V1/V2 only as V3 doesn't need this 2377 * at packet level. 2378 */ 2379 if (atomic_read(&po->tp_drops)) 2380 status |= TP_STATUS_LOSING; 2381 } 2382 2383 po->stats.stats1.tp_packets++; 2384 if (copy_skb) { 2385 status |= TP_STATUS_COPY; 2386 skb_clear_delivery_time(copy_skb); 2387 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); 2388 } 2389 spin_unlock(&sk->sk_receive_queue.lock); 2390 2391 skb_copy_bits(skb, 0, h.raw + macoff, snaplen); 2392 2393 /* Always timestamp; prefer an existing software timestamp taken 2394 * closer to the time of capture. 2395 */ 2396 ts_status = tpacket_get_timestamp(skb, &ts, 2397 READ_ONCE(po->tp_tstamp) | 2398 SOF_TIMESTAMPING_SOFTWARE); 2399 if (!ts_status) 2400 ktime_get_real_ts64(&ts); 2401 2402 status |= ts_status; 2403 2404 switch (po->tp_version) { 2405 case TPACKET_V1: 2406 h.h1->tp_len = skb->len; 2407 h.h1->tp_snaplen = snaplen; 2408 h.h1->tp_mac = macoff; 2409 h.h1->tp_net = netoff; 2410 h.h1->tp_sec = ts.tv_sec; 2411 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; 2412 hdrlen = sizeof(*h.h1); 2413 break; 2414 case TPACKET_V2: 2415 h.h2->tp_len = skb->len; 2416 h.h2->tp_snaplen = snaplen; 2417 h.h2->tp_mac = macoff; 2418 h.h2->tp_net = netoff; 2419 h.h2->tp_sec = ts.tv_sec; 2420 h.h2->tp_nsec = ts.tv_nsec; 2421 if (skb_vlan_tag_present(skb)) { 2422 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2423 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2424 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2425 } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { 2426 h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev); 2427 h.h2->tp_vlan_tpid = ntohs(skb->protocol); 2428 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2429 } else { 2430 h.h2->tp_vlan_tci = 0; 2431 h.h2->tp_vlan_tpid = 0; 2432 } 2433 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); 2434 hdrlen = sizeof(*h.h2); 2435 break; 2436 case TPACKET_V3: 2437 /* tp_nxt_offset,vlan are already populated above. 2438 * So DONT clear those fields here 2439 */ 2440 h.h3->tp_status |= status; 2441 h.h3->tp_len = skb->len; 2442 h.h3->tp_snaplen = snaplen; 2443 h.h3->tp_mac = macoff; 2444 h.h3->tp_net = netoff; 2445 h.h3->tp_sec = ts.tv_sec; 2446 h.h3->tp_nsec = ts.tv_nsec; 2447 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); 2448 hdrlen = sizeof(*h.h3); 2449 break; 2450 default: 2451 BUG(); 2452 } 2453 2454 sll = h.raw + TPACKET_ALIGN(hdrlen); 2455 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2456 sll->sll_family = AF_PACKET; 2457 sll->sll_hatype = dev->type; 2458 sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ? 2459 vlan_get_protocol_dgram(skb) : skb->protocol; 2460 sll->sll_pkttype = skb->pkt_type; 2461 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2462 sll->sll_ifindex = orig_dev->ifindex; 2463 else 2464 sll->sll_ifindex = dev->ifindex; 2465 2466 smp_mb(); 2467 2468 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 2469 if (po->tp_version <= TPACKET_V2) { 2470 u8 *start, *end; 2471 2472 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + 2473 macoff + snaplen); 2474 2475 for (start = h.raw; start < end; start += PAGE_SIZE) 2476 flush_dcache_page(pgv_to_page(start)); 2477 } 2478 smp_wmb(); 2479 #endif 2480 2481 if (po->tp_version <= TPACKET_V2) { 2482 spin_lock(&sk->sk_receive_queue.lock); 2483 __packet_set_status(po, h.raw, status); 2484 __clear_bit(slot_id, po->rx_ring.rx_owner_map); 2485 spin_unlock(&sk->sk_receive_queue.lock); 2486 sk->sk_data_ready(sk); 2487 } else if (po->tp_version == TPACKET_V3) { 2488 prb_clear_blk_fill_status(&po->rx_ring); 2489 } 2490 2491 drop_n_restore: 2492 if (skb_head != skb->data && skb_shared(skb)) { 2493 skb->data = skb_head; 2494 skb->len = skb_len; 2495 } 2496 drop: 2497 sk_skb_reason_drop(sk, skb, drop_reason); 2498 return 0; 2499 2500 drop_n_account: 2501 spin_unlock(&sk->sk_receive_queue.lock); 2502 atomic_inc(&po->tp_drops); 2503 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; 2504 2505 sk->sk_data_ready(sk); 2506 sk_skb_reason_drop(sk, copy_skb, drop_reason); 2507 goto drop_n_restore; 2508 } 2509 2510 static void tpacket_destruct_skb(struct sk_buff *skb) 2511 { 2512 struct packet_sock *po = pkt_sk(skb->sk); 2513 2514 if (likely(po->tx_ring.pg_vec)) { 2515 void *ph; 2516 __u32 ts; 2517 2518 ph = skb_zcopy_get_nouarg(skb); 2519 packet_dec_pending(&po->tx_ring); 2520 2521 ts = __packet_set_timestamp(po, ph, skb); 2522 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2523 2524 complete(&po->skb_completion); 2525 } 2526 2527 sock_wfree(skb); 2528 } 2529 2530 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) 2531 { 2532 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2533 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2534 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > 2535 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) 2536 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), 2537 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + 2538 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); 2539 2540 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) 2541 return -EINVAL; 2542 2543 return 0; 2544 } 2545 2546 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, 2547 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz) 2548 { 2549 int ret; 2550 2551 if (*len < vnet_hdr_sz) 2552 return -EINVAL; 2553 *len -= vnet_hdr_sz; 2554 2555 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) 2556 return -EFAULT; 2557 2558 ret = __packet_snd_vnet_parse(vnet_hdr, *len); 2559 if (ret) 2560 return ret; 2561 2562 /* move iter to point to the start of mac header */ 2563 if (vnet_hdr_sz != sizeof(struct virtio_net_hdr)) 2564 iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr)); 2565 2566 return 0; 2567 } 2568 2569 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2570 void *frame, struct net_device *dev, void *data, int tp_len, 2571 __be16 proto, unsigned char *addr, int hlen, int copylen, 2572 const struct sockcm_cookie *sockc) 2573 { 2574 union tpacket_uhdr ph; 2575 int to_write, offset, len, nr_frags, len_max; 2576 struct socket *sock = po->sk.sk_socket; 2577 struct page *page; 2578 int err; 2579 2580 ph.raw = frame; 2581 2582 skb->protocol = proto; 2583 skb->dev = dev; 2584 skb->priority = sockc->priority; 2585 skb->mark = sockc->mark; 2586 skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid); 2587 skb_setup_tx_timestamp(skb, sockc); 2588 skb_zcopy_set_nouarg(skb, ph.raw); 2589 2590 skb_reserve(skb, hlen); 2591 skb_reset_network_header(skb); 2592 2593 to_write = tp_len; 2594 2595 if (sock->type == SOCK_DGRAM) { 2596 err = dev_hard_header(skb, dev, ntohs(proto), addr, 2597 NULL, tp_len); 2598 if (unlikely(err < 0)) 2599 return -EINVAL; 2600 } else if (copylen) { 2601 int hdrlen = min_t(int, copylen, tp_len); 2602 2603 skb_push(skb, dev->hard_header_len); 2604 skb_put(skb, copylen - dev->hard_header_len); 2605 err = skb_store_bits(skb, 0, data, hdrlen); 2606 if (unlikely(err)) 2607 return err; 2608 if (!dev_validate_header(dev, skb->data, hdrlen)) 2609 return -EINVAL; 2610 2611 data += hdrlen; 2612 to_write -= hdrlen; 2613 } 2614 2615 offset = offset_in_page(data); 2616 len_max = PAGE_SIZE - offset; 2617 len = ((to_write > len_max) ? len_max : to_write); 2618 2619 skb->data_len = to_write; 2620 skb->len += to_write; 2621 skb->truesize += to_write; 2622 refcount_add(to_write, &po->sk.sk_wmem_alloc); 2623 2624 while (likely(to_write)) { 2625 nr_frags = skb_shinfo(skb)->nr_frags; 2626 2627 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { 2628 pr_err("Packet exceed the number of skb frags(%u)\n", 2629 (unsigned int)MAX_SKB_FRAGS); 2630 return -EFAULT; 2631 } 2632 2633 page = pgv_to_page(data); 2634 data += len; 2635 flush_dcache_page(page); 2636 get_page(page); 2637 skb_fill_page_desc(skb, nr_frags, page, offset, len); 2638 to_write -= len; 2639 offset = 0; 2640 len_max = PAGE_SIZE; 2641 len = ((to_write > len_max) ? len_max : to_write); 2642 } 2643 2644 packet_parse_headers(skb, sock); 2645 2646 return tp_len; 2647 } 2648 2649 static int tpacket_parse_header(struct packet_sock *po, void *frame, 2650 int size_max, void **data) 2651 { 2652 union tpacket_uhdr ph; 2653 int tp_len, off; 2654 2655 ph.raw = frame; 2656 2657 switch (po->tp_version) { 2658 case TPACKET_V3: 2659 if (ph.h3->tp_next_offset != 0) { 2660 pr_warn_once("variable sized slot not supported"); 2661 return -EINVAL; 2662 } 2663 tp_len = ph.h3->tp_len; 2664 break; 2665 case TPACKET_V2: 2666 tp_len = ph.h2->tp_len; 2667 break; 2668 default: 2669 tp_len = ph.h1->tp_len; 2670 break; 2671 } 2672 if (unlikely(tp_len > size_max)) { 2673 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); 2674 return -EMSGSIZE; 2675 } 2676 2677 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) { 2678 int off_min, off_max; 2679 2680 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2681 off_max = po->tx_ring.frame_size - tp_len; 2682 if (po->sk.sk_type == SOCK_DGRAM) { 2683 switch (po->tp_version) { 2684 case TPACKET_V3: 2685 off = ph.h3->tp_net; 2686 break; 2687 case TPACKET_V2: 2688 off = ph.h2->tp_net; 2689 break; 2690 default: 2691 off = ph.h1->tp_net; 2692 break; 2693 } 2694 } else { 2695 switch (po->tp_version) { 2696 case TPACKET_V3: 2697 off = ph.h3->tp_mac; 2698 break; 2699 case TPACKET_V2: 2700 off = ph.h2->tp_mac; 2701 break; 2702 default: 2703 off = ph.h1->tp_mac; 2704 break; 2705 } 2706 } 2707 if (unlikely((off < off_min) || (off_max < off))) 2708 return -EINVAL; 2709 } else { 2710 off = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2711 } 2712 2713 *data = frame + off; 2714 return tp_len; 2715 } 2716 2717 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2718 { 2719 struct sk_buff *skb = NULL; 2720 struct net_device *dev; 2721 struct virtio_net_hdr *vnet_hdr = NULL; 2722 struct sockcm_cookie sockc; 2723 __be16 proto; 2724 int err, reserve = 0; 2725 void *ph; 2726 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2727 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2728 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2729 unsigned char *addr = NULL; 2730 int tp_len, size_max; 2731 void *data; 2732 int len_sum = 0; 2733 int status = TP_STATUS_AVAILABLE; 2734 int hlen, tlen, copylen = 0; 2735 long timeo; 2736 2737 mutex_lock(&po->pg_vec_lock); 2738 2739 /* packet_sendmsg() check on tx_ring.pg_vec was lockless, 2740 * we need to confirm it under protection of pg_vec_lock. 2741 */ 2742 if (unlikely(!po->tx_ring.pg_vec)) { 2743 err = -EBUSY; 2744 goto out; 2745 } 2746 if (likely(saddr == NULL)) { 2747 dev = packet_cached_dev_get(po); 2748 proto = READ_ONCE(po->num); 2749 } else { 2750 err = -EINVAL; 2751 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2752 goto out; 2753 if (msg->msg_namelen < (saddr->sll_halen 2754 + offsetof(struct sockaddr_ll, 2755 sll_addr))) 2756 goto out; 2757 proto = saddr->sll_protocol; 2758 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2759 if (po->sk.sk_socket->type == SOCK_DGRAM) { 2760 if (dev && msg->msg_namelen < dev->addr_len + 2761 offsetof(struct sockaddr_ll, sll_addr)) 2762 goto out_put; 2763 addr = saddr->sll_addr; 2764 } 2765 } 2766 2767 err = -ENXIO; 2768 if (unlikely(dev == NULL)) 2769 goto out; 2770 err = -ENETDOWN; 2771 if (unlikely(!(dev->flags & IFF_UP))) 2772 goto out_put; 2773 2774 sockcm_init(&sockc, &po->sk); 2775 if (msg->msg_controllen) { 2776 err = sock_cmsg_send(&po->sk, msg, &sockc); 2777 if (unlikely(err)) 2778 goto out_put; 2779 } 2780 2781 if (po->sk.sk_socket->type == SOCK_RAW) 2782 reserve = dev->hard_header_len; 2783 size_max = po->tx_ring.frame_size 2784 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2785 2786 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz) 2787 size_max = dev->mtu + reserve + VLAN_HLEN; 2788 2789 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); 2790 reinit_completion(&po->skb_completion); 2791 2792 do { 2793 ph = packet_current_frame(po, &po->tx_ring, 2794 TP_STATUS_SEND_REQUEST); 2795 if (unlikely(ph == NULL)) { 2796 /* Note: packet_read_pending() might be slow if we 2797 * have to call it as it's per_cpu variable, but in 2798 * fast-path we don't have to call it, only when ph 2799 * is NULL, we need to check the pending_refcnt. 2800 */ 2801 if (need_wait && packet_read_pending(&po->tx_ring)) { 2802 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); 2803 if (timeo <= 0) { 2804 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; 2805 goto out_put; 2806 } 2807 /* check for additional frames */ 2808 continue; 2809 } else 2810 break; 2811 } 2812 2813 skb = NULL; 2814 tp_len = tpacket_parse_header(po, ph, size_max, &data); 2815 if (tp_len < 0) 2816 goto tpacket_error; 2817 2818 status = TP_STATUS_SEND_REQUEST; 2819 hlen = LL_RESERVED_SPACE(dev); 2820 tlen = dev->needed_tailroom; 2821 if (vnet_hdr_sz) { 2822 vnet_hdr = data; 2823 data += vnet_hdr_sz; 2824 tp_len -= vnet_hdr_sz; 2825 if (tp_len < 0 || 2826 __packet_snd_vnet_parse(vnet_hdr, tp_len)) { 2827 tp_len = -EINVAL; 2828 goto tpacket_error; 2829 } 2830 copylen = __virtio16_to_cpu(vio_le(), 2831 vnet_hdr->hdr_len); 2832 } 2833 copylen = max_t(int, copylen, dev->hard_header_len); 2834 skb = sock_alloc_send_skb(&po->sk, 2835 hlen + tlen + sizeof(struct sockaddr_ll) + 2836 (copylen - dev->hard_header_len), 2837 !need_wait, &err); 2838 2839 if (unlikely(skb == NULL)) { 2840 /* we assume the socket was initially writeable ... */ 2841 if (likely(len_sum > 0)) 2842 err = len_sum; 2843 goto out_status; 2844 } 2845 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, 2846 addr, hlen, copylen, &sockc); 2847 if (likely(tp_len >= 0) && 2848 tp_len > dev->mtu + reserve && 2849 !vnet_hdr_sz && 2850 !packet_extra_vlan_len_allowed(dev, skb)) 2851 tp_len = -EMSGSIZE; 2852 2853 if (unlikely(tp_len < 0)) { 2854 tpacket_error: 2855 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) { 2856 __packet_set_status(po, ph, 2857 TP_STATUS_AVAILABLE); 2858 packet_increment_head(&po->tx_ring); 2859 kfree_skb(skb); 2860 continue; 2861 } else { 2862 status = TP_STATUS_WRONG_FORMAT; 2863 err = tp_len; 2864 goto out_status; 2865 } 2866 } 2867 2868 if (vnet_hdr_sz) { 2869 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { 2870 tp_len = -EINVAL; 2871 goto tpacket_error; 2872 } 2873 virtio_net_hdr_set_proto(skb, vnet_hdr); 2874 } 2875 2876 skb->destructor = tpacket_destruct_skb; 2877 __packet_set_status(po, ph, TP_STATUS_SENDING); 2878 packet_inc_pending(&po->tx_ring); 2879 2880 status = TP_STATUS_SEND_REQUEST; 2881 err = packet_xmit(po, skb); 2882 if (unlikely(err != 0)) { 2883 if (err > 0) 2884 err = net_xmit_errno(err); 2885 if (err && __packet_get_status(po, ph) == 2886 TP_STATUS_AVAILABLE) { 2887 /* skb was destructed already */ 2888 skb = NULL; 2889 goto out_status; 2890 } 2891 /* 2892 * skb was dropped but not destructed yet; 2893 * let's treat it like congestion or err < 0 2894 */ 2895 err = 0; 2896 } 2897 packet_increment_head(&po->tx_ring); 2898 len_sum += tp_len; 2899 } while (1); 2900 2901 err = len_sum; 2902 goto out_put; 2903 2904 out_status: 2905 __packet_set_status(po, ph, status); 2906 kfree_skb(skb); 2907 out_put: 2908 dev_put(dev); 2909 out: 2910 mutex_unlock(&po->pg_vec_lock); 2911 return err; 2912 } 2913 2914 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, 2915 size_t reserve, size_t len, 2916 size_t linear, int noblock, 2917 int *err) 2918 { 2919 struct sk_buff *skb; 2920 2921 /* Under a page? Don't bother with paged skb. */ 2922 if (prepad + len < PAGE_SIZE || !linear) 2923 linear = len; 2924 2925 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 2926 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 2927 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 2928 err, PAGE_ALLOC_COSTLY_ORDER); 2929 if (!skb) 2930 return NULL; 2931 2932 skb_reserve(skb, reserve); 2933 skb_put(skb, linear); 2934 skb->data_len = len - linear; 2935 skb->len += len - linear; 2936 2937 return skb; 2938 } 2939 2940 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) 2941 { 2942 struct sock *sk = sock->sk; 2943 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2944 struct sk_buff *skb; 2945 struct net_device *dev; 2946 __be16 proto; 2947 unsigned char *addr = NULL; 2948 int err, reserve = 0; 2949 struct sockcm_cookie sockc; 2950 struct virtio_net_hdr vnet_hdr = { 0 }; 2951 int offset = 0; 2952 struct packet_sock *po = pkt_sk(sk); 2953 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); 2954 int hlen, tlen, linear; 2955 int extra_len = 0; 2956 2957 /* 2958 * Get and verify the address. 2959 */ 2960 2961 if (likely(saddr == NULL)) { 2962 dev = packet_cached_dev_get(po); 2963 proto = READ_ONCE(po->num); 2964 } else { 2965 err = -EINVAL; 2966 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2967 goto out; 2968 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2969 goto out; 2970 proto = saddr->sll_protocol; 2971 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2972 if (sock->type == SOCK_DGRAM) { 2973 if (dev && msg->msg_namelen < dev->addr_len + 2974 offsetof(struct sockaddr_ll, sll_addr)) 2975 goto out_unlock; 2976 addr = saddr->sll_addr; 2977 } 2978 } 2979 2980 err = -ENXIO; 2981 if (unlikely(dev == NULL)) 2982 goto out_unlock; 2983 err = -ENETDOWN; 2984 if (unlikely(!(dev->flags & IFF_UP))) 2985 goto out_unlock; 2986 2987 sockcm_init(&sockc, sk); 2988 if (msg->msg_controllen) { 2989 err = sock_cmsg_send(sk, msg, &sockc); 2990 if (unlikely(err)) 2991 goto out_unlock; 2992 } 2993 2994 if (sock->type == SOCK_RAW) 2995 reserve = dev->hard_header_len; 2996 if (vnet_hdr_sz) { 2997 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz); 2998 if (err) 2999 goto out_unlock; 3000 } 3001 3002 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 3003 if (!netif_supports_nofcs(dev)) { 3004 err = -EPROTONOSUPPORT; 3005 goto out_unlock; 3006 } 3007 extra_len = 4; /* We're doing our own CRC */ 3008 } 3009 3010 err = -EMSGSIZE; 3011 if (!vnet_hdr.gso_type && 3012 (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) 3013 goto out_unlock; 3014 3015 err = -ENOBUFS; 3016 hlen = LL_RESERVED_SPACE(dev); 3017 tlen = dev->needed_tailroom; 3018 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 3019 linear = max(linear, min_t(int, len, dev->hard_header_len)); 3020 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 3021 msg->msg_flags & MSG_DONTWAIT, &err); 3022 if (skb == NULL) 3023 goto out_unlock; 3024 3025 skb_reset_network_header(skb); 3026 3027 err = -EINVAL; 3028 if (sock->type == SOCK_DGRAM) { 3029 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 3030 if (unlikely(offset < 0)) 3031 goto out_free; 3032 } else if (reserve) { 3033 skb_reserve(skb, -reserve); 3034 if (len < reserve + sizeof(struct ipv6hdr) && 3035 dev->min_header_len != dev->hard_header_len) 3036 skb_reset_network_header(skb); 3037 } 3038 3039 /* Returns -EFAULT on error */ 3040 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); 3041 if (err) 3042 goto out_free; 3043 3044 if ((sock->type == SOCK_RAW && 3045 !dev_validate_header(dev, skb->data, len)) || !skb->len) { 3046 err = -EINVAL; 3047 goto out_free; 3048 } 3049 3050 skb_setup_tx_timestamp(skb, &sockc); 3051 3052 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && 3053 !packet_extra_vlan_len_allowed(dev, skb)) { 3054 err = -EMSGSIZE; 3055 goto out_free; 3056 } 3057 3058 skb->protocol = proto; 3059 skb->dev = dev; 3060 skb->priority = sockc.priority; 3061 skb->mark = sockc.mark; 3062 skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); 3063 3064 if (unlikely(extra_len == 4)) 3065 skb->no_fcs = 1; 3066 3067 packet_parse_headers(skb, sock); 3068 3069 if (vnet_hdr_sz) { 3070 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 3071 if (err) 3072 goto out_free; 3073 len += vnet_hdr_sz; 3074 virtio_net_hdr_set_proto(skb, &vnet_hdr); 3075 } 3076 3077 err = packet_xmit(po, skb); 3078 3079 if (unlikely(err != 0)) { 3080 if (err > 0) 3081 err = net_xmit_errno(err); 3082 if (err) 3083 goto out_unlock; 3084 } 3085 3086 dev_put(dev); 3087 3088 return len; 3089 3090 out_free: 3091 kfree_skb(skb); 3092 out_unlock: 3093 dev_put(dev); 3094 out: 3095 return err; 3096 } 3097 3098 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 3099 { 3100 struct sock *sk = sock->sk; 3101 struct packet_sock *po = pkt_sk(sk); 3102 3103 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy. 3104 * tpacket_snd() will redo the check safely. 3105 */ 3106 if (data_race(po->tx_ring.pg_vec)) 3107 return tpacket_snd(po, msg); 3108 3109 return packet_snd(sock, msg, len); 3110 } 3111 3112 /* 3113 * Close a PACKET socket. This is fairly simple. We immediately go 3114 * to 'closed' state and remove our protocol entry in the device list. 3115 */ 3116 3117 static int packet_release(struct socket *sock) 3118 { 3119 struct sock *sk = sock->sk; 3120 struct packet_sock *po; 3121 struct packet_fanout *f; 3122 struct net *net; 3123 union tpacket_req_u req_u; 3124 3125 if (!sk) 3126 return 0; 3127 3128 net = sock_net(sk); 3129 po = pkt_sk(sk); 3130 3131 mutex_lock(&net->packet.sklist_lock); 3132 sk_del_node_init_rcu(sk); 3133 mutex_unlock(&net->packet.sklist_lock); 3134 3135 sock_prot_inuse_add(net, sk->sk_prot, -1); 3136 3137 spin_lock(&po->bind_lock); 3138 unregister_prot_hook(sk, false); 3139 WRITE_ONCE(po->num, 0); 3140 packet_cached_dev_reset(po); 3141 3142 if (po->prot_hook.dev) { 3143 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker); 3144 po->prot_hook.dev = NULL; 3145 } 3146 spin_unlock(&po->bind_lock); 3147 3148 packet_flush_mclist(sk); 3149 3150 lock_sock(sk); 3151 if (po->rx_ring.pg_vec) { 3152 memset(&req_u, 0, sizeof(req_u)); 3153 packet_set_ring(sk, &req_u, 1, 0); 3154 } 3155 3156 if (po->tx_ring.pg_vec) { 3157 memset(&req_u, 0, sizeof(req_u)); 3158 packet_set_ring(sk, &req_u, 1, 1); 3159 } 3160 release_sock(sk); 3161 3162 f = fanout_release(sk); 3163 3164 synchronize_net(); 3165 3166 kfree(po->rollover); 3167 if (f) { 3168 fanout_release_data(f); 3169 kvfree(f); 3170 } 3171 /* 3172 * Now the socket is dead. No more input will appear. 3173 */ 3174 sock_orphan(sk); 3175 sock->sk = NULL; 3176 3177 /* Purge queues */ 3178 3179 skb_queue_purge(&sk->sk_receive_queue); 3180 packet_free_pending(po); 3181 3182 sock_put(sk); 3183 return 0; 3184 } 3185 3186 /* 3187 * Attach a packet hook. 3188 */ 3189 3190 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, 3191 __be16 proto) 3192 { 3193 struct packet_sock *po = pkt_sk(sk); 3194 struct net_device *dev = NULL; 3195 bool unlisted = false; 3196 bool need_rehook; 3197 int ret = 0; 3198 3199 lock_sock(sk); 3200 spin_lock(&po->bind_lock); 3201 if (!proto) 3202 proto = po->num; 3203 3204 rcu_read_lock(); 3205 3206 if (po->fanout) { 3207 ret = -EINVAL; 3208 goto out_unlock; 3209 } 3210 3211 if (name) { 3212 dev = dev_get_by_name_rcu(sock_net(sk), name); 3213 if (!dev) { 3214 ret = -ENODEV; 3215 goto out_unlock; 3216 } 3217 } else if (ifindex) { 3218 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3219 if (!dev) { 3220 ret = -ENODEV; 3221 goto out_unlock; 3222 } 3223 } 3224 3225 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev; 3226 3227 if (need_rehook) { 3228 dev_hold(dev); 3229 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 3230 rcu_read_unlock(); 3231 /* prevents packet_notifier() from calling 3232 * register_prot_hook() 3233 */ 3234 WRITE_ONCE(po->num, 0); 3235 __unregister_prot_hook(sk, true); 3236 rcu_read_lock(); 3237 if (dev) 3238 unlisted = !dev_get_by_index_rcu(sock_net(sk), 3239 dev->ifindex); 3240 } 3241 3242 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING)); 3243 WRITE_ONCE(po->num, proto); 3244 po->prot_hook.type = proto; 3245 3246 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker); 3247 3248 if (unlikely(unlisted)) { 3249 po->prot_hook.dev = NULL; 3250 WRITE_ONCE(po->ifindex, -1); 3251 packet_cached_dev_reset(po); 3252 } else { 3253 netdev_hold(dev, &po->prot_hook.dev_tracker, 3254 GFP_ATOMIC); 3255 po->prot_hook.dev = dev; 3256 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); 3257 packet_cached_dev_assign(po, dev); 3258 } 3259 dev_put(dev); 3260 } 3261 3262 if (proto == 0 || !need_rehook) 3263 goto out_unlock; 3264 3265 if (!unlisted && (!dev || (dev->flags & IFF_UP))) { 3266 register_prot_hook(sk); 3267 } else { 3268 sk->sk_err = ENETDOWN; 3269 if (!sock_flag(sk, SOCK_DEAD)) 3270 sk_error_report(sk); 3271 } 3272 3273 out_unlock: 3274 rcu_read_unlock(); 3275 spin_unlock(&po->bind_lock); 3276 release_sock(sk); 3277 return ret; 3278 } 3279 3280 /* 3281 * Bind a packet socket to a device 3282 */ 3283 3284 static int packet_bind_spkt(struct socket *sock, struct sockaddr_unsized *uaddr, 3285 int addr_len) 3286 { 3287 struct sock *sk = sock->sk; 3288 struct sockaddr *sa = (struct sockaddr *)uaddr; 3289 char name[sizeof(sa->sa_data) + 1]; 3290 3291 /* 3292 * Check legality 3293 */ 3294 3295 if (addr_len != sizeof(struct sockaddr)) 3296 return -EINVAL; 3297 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be 3298 * zero-terminated. 3299 */ 3300 memcpy(name, sa->sa_data, sizeof(sa->sa_data)); 3301 name[sizeof(sa->sa_data)] = 0; 3302 3303 return packet_do_bind(sk, name, 0, 0); 3304 } 3305 3306 static int packet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) 3307 { 3308 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; 3309 struct sock *sk = sock->sk; 3310 3311 /* 3312 * Check legality 3313 */ 3314 3315 if (addr_len < sizeof(struct sockaddr_ll)) 3316 return -EINVAL; 3317 if (sll->sll_family != AF_PACKET) 3318 return -EINVAL; 3319 3320 return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol); 3321 } 3322 3323 static struct proto packet_proto = { 3324 .name = "PACKET", 3325 .owner = THIS_MODULE, 3326 .obj_size = sizeof(struct packet_sock), 3327 }; 3328 3329 /* 3330 * Create a packet of type SOCK_PACKET. 3331 */ 3332 3333 static int packet_create(struct net *net, struct socket *sock, int protocol, 3334 int kern) 3335 { 3336 struct sock *sk; 3337 struct packet_sock *po; 3338 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 3339 int err; 3340 3341 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 3342 return -EPERM; 3343 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 3344 sock->type != SOCK_PACKET) 3345 return -ESOCKTNOSUPPORT; 3346 3347 sock->state = SS_UNCONNECTED; 3348 3349 err = -ENOBUFS; 3350 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); 3351 if (sk == NULL) 3352 goto out; 3353 3354 sock->ops = &packet_ops; 3355 if (sock->type == SOCK_PACKET) 3356 sock->ops = &packet_ops_spkt; 3357 3358 po = pkt_sk(sk); 3359 err = packet_alloc_pending(po); 3360 if (err) 3361 goto out_sk_free; 3362 3363 sock_init_data(sock, sk); 3364 3365 init_completion(&po->skb_completion); 3366 sk->sk_family = PF_PACKET; 3367 po->num = proto; 3368 3369 packet_cached_dev_reset(po); 3370 3371 sk->sk_destruct = packet_sock_destruct; 3372 3373 /* 3374 * Attach a protocol block 3375 */ 3376 3377 spin_lock_init(&po->bind_lock); 3378 mutex_init(&po->pg_vec_lock); 3379 po->rollover = NULL; 3380 po->prot_hook.func = packet_rcv; 3381 3382 if (sock->type == SOCK_PACKET) 3383 po->prot_hook.func = packet_rcv_spkt; 3384 3385 po->prot_hook.af_packet_priv = sk; 3386 po->prot_hook.af_packet_net = sock_net(sk); 3387 3388 if (proto) { 3389 po->prot_hook.type = proto; 3390 __register_prot_hook(sk); 3391 } 3392 3393 mutex_lock(&net->packet.sklist_lock); 3394 sk_add_node_tail_rcu(sk, &net->packet.sklist); 3395 mutex_unlock(&net->packet.sklist_lock); 3396 3397 sock_prot_inuse_add(net, &packet_proto, 1); 3398 3399 return 0; 3400 out_sk_free: 3401 sk_free(sk); 3402 out: 3403 return err; 3404 } 3405 3406 /* 3407 * Pull a packet from our receive queue and hand it to the user. 3408 * If necessary we block. 3409 */ 3410 3411 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 3412 int flags) 3413 { 3414 struct sock *sk = sock->sk; 3415 struct sk_buff *skb; 3416 int copied, err; 3417 int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz); 3418 unsigned int origlen = 0; 3419 3420 err = -EINVAL; 3421 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) 3422 goto out; 3423 3424 #if 0 3425 /* What error should we return now? EUNATTACH? */ 3426 if (pkt_sk(sk)->ifindex < 0) 3427 return -ENODEV; 3428 #endif 3429 3430 if (flags & MSG_ERRQUEUE) { 3431 err = sock_recv_errqueue(sk, msg, len, 3432 SOL_PACKET, PACKET_TX_TIMESTAMP); 3433 goto out; 3434 } 3435 3436 /* 3437 * Call the generic datagram receiver. This handles all sorts 3438 * of horrible races and re-entrancy so we can forget about it 3439 * in the protocol layers. 3440 * 3441 * Now it will return ENETDOWN, if device have just gone down, 3442 * but then it will block. 3443 */ 3444 3445 skb = skb_recv_datagram(sk, flags, &err); 3446 3447 /* 3448 * An error occurred so return it. Because skb_recv_datagram() 3449 * handles the blocking we don't see and worry about blocking 3450 * retries. 3451 */ 3452 3453 if (skb == NULL) 3454 goto out; 3455 3456 packet_rcv_try_clear_pressure(pkt_sk(sk)); 3457 3458 if (vnet_hdr_len) { 3459 err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len); 3460 if (err) 3461 goto out_free; 3462 } 3463 3464 /* You lose any data beyond the buffer you gave. If it worries 3465 * a user program they can ask the device for its MTU 3466 * anyway. 3467 */ 3468 copied = skb->len; 3469 if (copied > len) { 3470 copied = len; 3471 msg->msg_flags |= MSG_TRUNC; 3472 } 3473 3474 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3475 if (err) 3476 goto out_free; 3477 3478 if (sock->type != SOCK_PACKET) { 3479 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3480 3481 /* Original length was stored in sockaddr_ll fields */ 3482 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3483 sll->sll_family = AF_PACKET; 3484 sll->sll_protocol = (sock->type == SOCK_DGRAM) ? 3485 vlan_get_protocol_dgram(skb) : skb->protocol; 3486 } 3487 3488 sock_recv_cmsgs(msg, sk, skb); 3489 3490 if (msg->msg_name) { 3491 const size_t max_len = min(sizeof(skb->cb), 3492 sizeof(struct sockaddr_storage)); 3493 int copy_len; 3494 3495 /* If the address length field is there to be filled 3496 * in, we fill it in now. 3497 */ 3498 if (sock->type == SOCK_PACKET) { 3499 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3500 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3501 copy_len = msg->msg_namelen; 3502 } else { 3503 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3504 3505 msg->msg_namelen = sll->sll_halen + 3506 offsetof(struct sockaddr_ll, sll_addr); 3507 copy_len = msg->msg_namelen; 3508 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { 3509 memset(msg->msg_name + 3510 offsetof(struct sockaddr_ll, sll_addr), 3511 0, sizeof(sll->sll_addr)); 3512 msg->msg_namelen = sizeof(struct sockaddr_ll); 3513 } 3514 } 3515 if (WARN_ON_ONCE(copy_len > max_len)) { 3516 copy_len = max_len; 3517 msg->msg_namelen = copy_len; 3518 } 3519 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); 3520 } 3521 3522 if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) { 3523 struct tpacket_auxdata aux; 3524 3525 aux.tp_status = TP_STATUS_USER; 3526 if (skb->ip_summed == CHECKSUM_PARTIAL) 3527 aux.tp_status |= TP_STATUS_CSUMNOTREADY; 3528 else if (skb->pkt_type != PACKET_OUTGOING && 3529 skb_csum_unnecessary(skb)) 3530 aux.tp_status |= TP_STATUS_CSUM_VALID; 3531 if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) 3532 aux.tp_status |= TP_STATUS_GSO_TCP; 3533 3534 aux.tp_len = origlen; 3535 aux.tp_snaplen = skb->len; 3536 aux.tp_mac = 0; 3537 aux.tp_net = skb_network_offset(skb); 3538 if (skb_vlan_tag_present(skb)) { 3539 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3540 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3541 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3542 } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { 3543 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3544 struct net_device *dev; 3545 3546 rcu_read_lock(); 3547 dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex); 3548 if (dev) { 3549 aux.tp_vlan_tci = vlan_get_tci(skb, dev); 3550 aux.tp_vlan_tpid = ntohs(skb->protocol); 3551 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3552 } else { 3553 aux.tp_vlan_tci = 0; 3554 aux.tp_vlan_tpid = 0; 3555 } 3556 rcu_read_unlock(); 3557 } else { 3558 aux.tp_vlan_tci = 0; 3559 aux.tp_vlan_tpid = 0; 3560 } 3561 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 3562 } 3563 3564 /* 3565 * Free or return the buffer as appropriate. Again this 3566 * hides all the races and re-entrancy issues from us. 3567 */ 3568 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); 3569 3570 out_free: 3571 skb_free_datagram(sk, skb); 3572 out: 3573 return err; 3574 } 3575 3576 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, 3577 int peer) 3578 { 3579 struct net_device *dev; 3580 struct sock *sk = sock->sk; 3581 3582 if (peer) 3583 return -EOPNOTSUPP; 3584 3585 uaddr->sa_family = AF_PACKET; 3586 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); 3587 rcu_read_lock(); 3588 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); 3589 if (dev) 3590 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); 3591 rcu_read_unlock(); 3592 3593 return sizeof(*uaddr); 3594 } 3595 3596 static int packet_getname(struct socket *sock, struct sockaddr *uaddr, 3597 int peer) 3598 { 3599 struct net_device *dev; 3600 struct sock *sk = sock->sk; 3601 struct packet_sock *po = pkt_sk(sk); 3602 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); 3603 int ifindex; 3604 3605 if (peer) 3606 return -EOPNOTSUPP; 3607 3608 ifindex = READ_ONCE(po->ifindex); 3609 sll->sll_family = AF_PACKET; 3610 sll->sll_ifindex = ifindex; 3611 sll->sll_protocol = READ_ONCE(po->num); 3612 sll->sll_pkttype = 0; 3613 rcu_read_lock(); 3614 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); 3615 if (dev) { 3616 sll->sll_hatype = dev->type; 3617 sll->sll_halen = dev->addr_len; 3618 3619 /* Let __fortify_memcpy_chk() know the actual buffer size. */ 3620 memcpy(((struct sockaddr_storage *)sll)->__data + 3621 offsetof(struct sockaddr_ll, sll_addr) - 3622 offsetofend(struct sockaddr_ll, sll_family), 3623 dev->dev_addr, dev->addr_len); 3624 } else { 3625 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ 3626 sll->sll_halen = 0; 3627 } 3628 rcu_read_unlock(); 3629 3630 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; 3631 } 3632 3633 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, 3634 int what) 3635 { 3636 switch (i->type) { 3637 case PACKET_MR_MULTICAST: 3638 if (i->alen != dev->addr_len) 3639 return -EINVAL; 3640 if (what > 0) 3641 return dev_mc_add(dev, i->addr); 3642 else 3643 return dev_mc_del(dev, i->addr); 3644 break; 3645 case PACKET_MR_PROMISC: 3646 return dev_set_promiscuity(dev, what); 3647 case PACKET_MR_ALLMULTI: 3648 return dev_set_allmulti(dev, what); 3649 case PACKET_MR_UNICAST: 3650 if (i->alen != dev->addr_len) 3651 return -EINVAL; 3652 if (what > 0) 3653 return dev_uc_add(dev, i->addr); 3654 else 3655 return dev_uc_del(dev, i->addr); 3656 break; 3657 default: 3658 break; 3659 } 3660 return 0; 3661 } 3662 3663 static void packet_dev_mclist_delete(struct net_device *dev, 3664 struct packet_mclist **mlp, 3665 struct list_head *list) 3666 { 3667 struct packet_mclist *ml; 3668 3669 while ((ml = *mlp) != NULL) { 3670 if (ml->ifindex == dev->ifindex) { 3671 list_add(&ml->remove_list, list); 3672 *mlp = ml->next; 3673 } else 3674 mlp = &ml->next; 3675 } 3676 } 3677 3678 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) 3679 { 3680 struct packet_sock *po = pkt_sk(sk); 3681 struct packet_mclist *ml, *i; 3682 struct net_device *dev; 3683 int err; 3684 3685 rtnl_lock(); 3686 3687 err = -ENODEV; 3688 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); 3689 if (!dev) 3690 goto done; 3691 3692 err = -EINVAL; 3693 if (mreq->mr_alen > dev->addr_len) 3694 goto done; 3695 3696 err = -ENOBUFS; 3697 i = kmalloc_obj(*i); 3698 if (i == NULL) 3699 goto done; 3700 3701 err = 0; 3702 for (ml = po->mclist; ml; ml = ml->next) { 3703 if (ml->ifindex == mreq->mr_ifindex && 3704 ml->type == mreq->mr_type && 3705 ml->alen == mreq->mr_alen && 3706 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3707 ml->count++; 3708 /* Free the new element ... */ 3709 kfree(i); 3710 goto done; 3711 } 3712 } 3713 3714 i->type = mreq->mr_type; 3715 i->ifindex = mreq->mr_ifindex; 3716 i->alen = mreq->mr_alen; 3717 memcpy(i->addr, mreq->mr_address, i->alen); 3718 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3719 i->count = 1; 3720 INIT_LIST_HEAD(&i->remove_list); 3721 i->next = po->mclist; 3722 po->mclist = i; 3723 err = packet_dev_mc(dev, i, 1); 3724 if (err) { 3725 po->mclist = i->next; 3726 kfree(i); 3727 } 3728 3729 done: 3730 rtnl_unlock(); 3731 return err; 3732 } 3733 3734 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) 3735 { 3736 struct packet_mclist *ml, **mlp; 3737 3738 rtnl_lock(); 3739 3740 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { 3741 if (ml->ifindex == mreq->mr_ifindex && 3742 ml->type == mreq->mr_type && 3743 ml->alen == mreq->mr_alen && 3744 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { 3745 if (--ml->count == 0) { 3746 struct net_device *dev; 3747 *mlp = ml->next; 3748 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3749 if (dev) 3750 packet_dev_mc(dev, ml, -1); 3751 kfree(ml); 3752 } 3753 break; 3754 } 3755 } 3756 rtnl_unlock(); 3757 return 0; 3758 } 3759 3760 static void packet_flush_mclist(struct sock *sk) 3761 { 3762 struct packet_sock *po = pkt_sk(sk); 3763 struct packet_mclist *ml; 3764 3765 if (!po->mclist) 3766 return; 3767 3768 rtnl_lock(); 3769 while ((ml = po->mclist) != NULL) { 3770 struct net_device *dev; 3771 3772 po->mclist = ml->next; 3773 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); 3774 if (dev != NULL) 3775 packet_dev_mc(dev, ml, -1); 3776 kfree(ml); 3777 } 3778 rtnl_unlock(); 3779 } 3780 3781 static int 3782 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, 3783 unsigned int optlen) 3784 { 3785 struct sock *sk = sock->sk; 3786 struct packet_sock *po = pkt_sk(sk); 3787 int ret; 3788 3789 if (level != SOL_PACKET) 3790 return -ENOPROTOOPT; 3791 3792 switch (optname) { 3793 case PACKET_ADD_MEMBERSHIP: 3794 case PACKET_DROP_MEMBERSHIP: 3795 { 3796 struct packet_mreq_max mreq; 3797 int len = optlen; 3798 memset(&mreq, 0, sizeof(mreq)); 3799 if (len < sizeof(struct packet_mreq)) 3800 return -EINVAL; 3801 if (len > sizeof(mreq)) 3802 len = sizeof(mreq); 3803 if (copy_from_sockptr(&mreq, optval, len)) 3804 return -EFAULT; 3805 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) 3806 return -EINVAL; 3807 if (optname == PACKET_ADD_MEMBERSHIP) 3808 ret = packet_mc_add(sk, &mreq); 3809 else 3810 ret = packet_mc_drop(sk, &mreq); 3811 return ret; 3812 } 3813 3814 case PACKET_RX_RING: 3815 case PACKET_TX_RING: 3816 { 3817 union tpacket_req_u req_u; 3818 3819 ret = -EINVAL; 3820 lock_sock(sk); 3821 switch (po->tp_version) { 3822 case TPACKET_V1: 3823 case TPACKET_V2: 3824 if (optlen < sizeof(req_u.req)) 3825 break; 3826 ret = copy_from_sockptr(&req_u.req, optval, 3827 sizeof(req_u.req)) ? 3828 -EINVAL : 0; 3829 break; 3830 case TPACKET_V3: 3831 default: 3832 if (optlen < sizeof(req_u.req3)) 3833 break; 3834 ret = copy_from_sockptr(&req_u.req3, optval, 3835 sizeof(req_u.req3)) ? 3836 -EINVAL : 0; 3837 break; 3838 } 3839 if (!ret) 3840 ret = packet_set_ring(sk, &req_u, 0, 3841 optname == PACKET_TX_RING); 3842 release_sock(sk); 3843 return ret; 3844 } 3845 case PACKET_COPY_THRESH: 3846 { 3847 int val; 3848 3849 if (optlen != sizeof(val)) 3850 return -EINVAL; 3851 if (copy_from_sockptr(&val, optval, sizeof(val))) 3852 return -EFAULT; 3853 3854 WRITE_ONCE(pkt_sk(sk)->copy_thresh, val); 3855 return 0; 3856 } 3857 case PACKET_VERSION: 3858 { 3859 int val; 3860 3861 if (optlen != sizeof(val)) 3862 return -EINVAL; 3863 if (copy_from_sockptr(&val, optval, sizeof(val))) 3864 return -EFAULT; 3865 switch (val) { 3866 case TPACKET_V1: 3867 case TPACKET_V2: 3868 case TPACKET_V3: 3869 break; 3870 default: 3871 return -EINVAL; 3872 } 3873 lock_sock(sk); 3874 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3875 ret = -EBUSY; 3876 } else { 3877 po->tp_version = val; 3878 ret = 0; 3879 } 3880 release_sock(sk); 3881 return ret; 3882 } 3883 case PACKET_RESERVE: 3884 { 3885 unsigned int val; 3886 3887 if (optlen != sizeof(val)) 3888 return -EINVAL; 3889 if (copy_from_sockptr(&val, optval, sizeof(val))) 3890 return -EFAULT; 3891 if (val > INT_MAX) 3892 return -EINVAL; 3893 lock_sock(sk); 3894 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3895 ret = -EBUSY; 3896 } else { 3897 po->tp_reserve = val; 3898 ret = 0; 3899 } 3900 release_sock(sk); 3901 return ret; 3902 } 3903 case PACKET_LOSS: 3904 { 3905 unsigned int val; 3906 3907 if (optlen != sizeof(val)) 3908 return -EINVAL; 3909 if (copy_from_sockptr(&val, optval, sizeof(val))) 3910 return -EFAULT; 3911 3912 lock_sock(sk); 3913 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3914 ret = -EBUSY; 3915 } else { 3916 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val); 3917 ret = 0; 3918 } 3919 release_sock(sk); 3920 return ret; 3921 } 3922 case PACKET_AUXDATA: 3923 { 3924 int val; 3925 3926 if (optlen < sizeof(val)) 3927 return -EINVAL; 3928 if (copy_from_sockptr(&val, optval, sizeof(val))) 3929 return -EFAULT; 3930 3931 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val); 3932 return 0; 3933 } 3934 case PACKET_ORIGDEV: 3935 { 3936 int val; 3937 3938 if (optlen < sizeof(val)) 3939 return -EINVAL; 3940 if (copy_from_sockptr(&val, optval, sizeof(val))) 3941 return -EFAULT; 3942 3943 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val); 3944 return 0; 3945 } 3946 case PACKET_VNET_HDR: 3947 case PACKET_VNET_HDR_SZ: 3948 { 3949 int val, hdr_len; 3950 3951 if (sock->type != SOCK_RAW) 3952 return -EINVAL; 3953 if (optlen < sizeof(val)) 3954 return -EINVAL; 3955 if (copy_from_sockptr(&val, optval, sizeof(val))) 3956 return -EFAULT; 3957 3958 if (optname == PACKET_VNET_HDR_SZ) { 3959 if (val && val != sizeof(struct virtio_net_hdr) && 3960 val != sizeof(struct virtio_net_hdr_mrg_rxbuf)) 3961 return -EINVAL; 3962 hdr_len = val; 3963 } else { 3964 hdr_len = val ? sizeof(struct virtio_net_hdr) : 0; 3965 } 3966 lock_sock(sk); 3967 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3968 ret = -EBUSY; 3969 } else { 3970 WRITE_ONCE(po->vnet_hdr_sz, hdr_len); 3971 ret = 0; 3972 } 3973 release_sock(sk); 3974 return ret; 3975 } 3976 case PACKET_TIMESTAMP: 3977 { 3978 int val; 3979 3980 if (optlen != sizeof(val)) 3981 return -EINVAL; 3982 if (copy_from_sockptr(&val, optval, sizeof(val))) 3983 return -EFAULT; 3984 3985 WRITE_ONCE(po->tp_tstamp, val); 3986 return 0; 3987 } 3988 case PACKET_FANOUT: 3989 { 3990 struct fanout_args args = { 0 }; 3991 3992 if (optlen != sizeof(int) && optlen != sizeof(args)) 3993 return -EINVAL; 3994 if (copy_from_sockptr(&args, optval, optlen)) 3995 return -EFAULT; 3996 3997 return fanout_add(sk, &args); 3998 } 3999 case PACKET_FANOUT_DATA: 4000 { 4001 /* Paired with the WRITE_ONCE() in fanout_add() */ 4002 if (!READ_ONCE(po->fanout)) 4003 return -EINVAL; 4004 4005 return fanout_set_data(po, optval, optlen); 4006 } 4007 case PACKET_IGNORE_OUTGOING: 4008 { 4009 int val; 4010 4011 if (optlen != sizeof(val)) 4012 return -EINVAL; 4013 if (copy_from_sockptr(&val, optval, sizeof(val))) 4014 return -EFAULT; 4015 if (val < 0 || val > 1) 4016 return -EINVAL; 4017 4018 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val); 4019 return 0; 4020 } 4021 case PACKET_TX_HAS_OFF: 4022 { 4023 unsigned int val; 4024 4025 if (optlen != sizeof(val)) 4026 return -EINVAL; 4027 if (copy_from_sockptr(&val, optval, sizeof(val))) 4028 return -EFAULT; 4029 4030 lock_sock(sk); 4031 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec) 4032 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val); 4033 4034 release_sock(sk); 4035 return 0; 4036 } 4037 case PACKET_QDISC_BYPASS: 4038 { 4039 int val; 4040 4041 if (optlen != sizeof(val)) 4042 return -EINVAL; 4043 if (copy_from_sockptr(&val, optval, sizeof(val))) 4044 return -EFAULT; 4045 4046 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val); 4047 return 0; 4048 } 4049 default: 4050 return -ENOPROTOOPT; 4051 } 4052 } 4053 4054 static int packet_getsockopt(struct socket *sock, int level, int optname, 4055 sockopt_t *opt) 4056 { 4057 int len; 4058 int val, lv = sizeof(val); 4059 struct sock *sk = sock->sk; 4060 struct packet_sock *po = pkt_sk(sk); 4061 void *data = &val; 4062 union tpacket_stats_u st; 4063 struct tpacket_rollover_stats rstats; 4064 int drops; 4065 4066 if (level != SOL_PACKET) 4067 return -ENOPROTOOPT; 4068 4069 len = opt->optlen; 4070 4071 if (len < 0) 4072 return -EINVAL; 4073 4074 switch (optname) { 4075 case PACKET_STATISTICS: 4076 spin_lock_bh(&sk->sk_receive_queue.lock); 4077 memcpy(&st, &po->stats, sizeof(st)); 4078 memset(&po->stats, 0, sizeof(po->stats)); 4079 spin_unlock_bh(&sk->sk_receive_queue.lock); 4080 drops = atomic_xchg(&po->tp_drops, 0); 4081 4082 if (po->tp_version == TPACKET_V3) { 4083 lv = sizeof(struct tpacket_stats_v3); 4084 st.stats3.tp_drops = drops; 4085 st.stats3.tp_packets += drops; 4086 data = &st.stats3; 4087 } else { 4088 lv = sizeof(struct tpacket_stats); 4089 st.stats1.tp_drops = drops; 4090 st.stats1.tp_packets += drops; 4091 data = &st.stats1; 4092 } 4093 4094 break; 4095 case PACKET_AUXDATA: 4096 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA); 4097 break; 4098 case PACKET_ORIGDEV: 4099 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV); 4100 break; 4101 case PACKET_VNET_HDR: 4102 val = !!READ_ONCE(po->vnet_hdr_sz); 4103 break; 4104 case PACKET_VNET_HDR_SZ: 4105 val = READ_ONCE(po->vnet_hdr_sz); 4106 break; 4107 case PACKET_COPY_THRESH: 4108 val = READ_ONCE(pkt_sk(sk)->copy_thresh); 4109 break; 4110 case PACKET_VERSION: 4111 val = po->tp_version; 4112 break; 4113 case PACKET_HDRLEN: 4114 if (len > sizeof(int)) 4115 len = sizeof(int); 4116 if (len < sizeof(int)) 4117 return -EINVAL; 4118 if (copy_from_iter(&val, len, &opt->iter_in) != len) 4119 return -EFAULT; 4120 switch (val) { 4121 case TPACKET_V1: 4122 val = sizeof(struct tpacket_hdr); 4123 break; 4124 case TPACKET_V2: 4125 val = sizeof(struct tpacket2_hdr); 4126 break; 4127 case TPACKET_V3: 4128 val = sizeof(struct tpacket3_hdr); 4129 break; 4130 default: 4131 return -EINVAL; 4132 } 4133 break; 4134 case PACKET_RESERVE: 4135 val = po->tp_reserve; 4136 break; 4137 case PACKET_LOSS: 4138 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS); 4139 break; 4140 case PACKET_TIMESTAMP: 4141 val = READ_ONCE(po->tp_tstamp); 4142 break; 4143 case PACKET_FANOUT: 4144 val = (po->fanout ? 4145 ((u32)po->fanout->id | 4146 ((u32)po->fanout->type << 16) | 4147 ((u32)po->fanout->flags << 24)) : 4148 0); 4149 break; 4150 case PACKET_IGNORE_OUTGOING: 4151 val = READ_ONCE(po->prot_hook.ignore_outgoing); 4152 break; 4153 case PACKET_ROLLOVER_STATS: 4154 if (!po->rollover) 4155 return -EINVAL; 4156 rstats.tp_all = atomic_long_read(&po->rollover->num); 4157 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); 4158 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); 4159 data = &rstats; 4160 lv = sizeof(rstats); 4161 break; 4162 case PACKET_TX_HAS_OFF: 4163 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF); 4164 break; 4165 case PACKET_QDISC_BYPASS: 4166 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS); 4167 break; 4168 default: 4169 return -ENOPROTOOPT; 4170 } 4171 4172 if (len > lv) 4173 len = lv; 4174 opt->optlen = len; 4175 if (copy_to_iter(data, len, &opt->iter_out) != len) 4176 return -EFAULT; 4177 return 0; 4178 } 4179 4180 static int packet_notifier(struct notifier_block *this, 4181 unsigned long msg, void *ptr) 4182 { 4183 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4184 struct net *net = dev_net(dev); 4185 struct packet_mclist *ml, *tmp; 4186 LIST_HEAD(mclist); 4187 struct sock *sk; 4188 4189 rcu_read_lock(); 4190 sk_for_each_rcu(sk, &net->packet.sklist) { 4191 struct packet_sock *po = pkt_sk(sk); 4192 4193 switch (msg) { 4194 case NETDEV_UNREGISTER: 4195 if (po->mclist) 4196 packet_dev_mclist_delete(dev, &po->mclist, 4197 &mclist); 4198 fallthrough; 4199 4200 case NETDEV_DOWN: 4201 if (dev->ifindex == po->ifindex) { 4202 spin_lock(&po->bind_lock); 4203 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) { 4204 __unregister_prot_hook(sk, false); 4205 sk->sk_err = ENETDOWN; 4206 if (!sock_flag(sk, SOCK_DEAD)) 4207 sk_error_report(sk); 4208 } 4209 if (msg == NETDEV_UNREGISTER) { 4210 packet_cached_dev_reset(po); 4211 WRITE_ONCE(po->ifindex, -1); 4212 netdev_put(po->prot_hook.dev, 4213 &po->prot_hook.dev_tracker); 4214 po->prot_hook.dev = NULL; 4215 } 4216 spin_unlock(&po->bind_lock); 4217 } 4218 break; 4219 case NETDEV_UP: 4220 if (dev->ifindex == po->ifindex) { 4221 spin_lock(&po->bind_lock); 4222 if (po->num) 4223 register_prot_hook(sk); 4224 spin_unlock(&po->bind_lock); 4225 } 4226 break; 4227 } 4228 } 4229 rcu_read_unlock(); 4230 4231 /* packet_dev_mc might grab instance locks so can't run under rcu */ 4232 list_for_each_entry_safe(ml, tmp, &mclist, remove_list) { 4233 packet_dev_mc(dev, ml, -1); 4234 kfree(ml); 4235 } 4236 4237 return NOTIFY_DONE; 4238 } 4239 4240 4241 static int packet_ioctl(struct socket *sock, unsigned int cmd, 4242 unsigned long arg) 4243 { 4244 struct sock *sk = sock->sk; 4245 4246 switch (cmd) { 4247 case SIOCOUTQ: 4248 { 4249 int amount = sk_wmem_alloc_get(sk); 4250 4251 return put_user(amount, (int __user *)arg); 4252 } 4253 case SIOCINQ: 4254 { 4255 struct sk_buff *skb; 4256 int amount = 0; 4257 4258 spin_lock_bh(&sk->sk_receive_queue.lock); 4259 skb = skb_peek(&sk->sk_receive_queue); 4260 if (skb) 4261 amount = skb->len; 4262 spin_unlock_bh(&sk->sk_receive_queue.lock); 4263 return put_user(amount, (int __user *)arg); 4264 } 4265 #ifdef CONFIG_INET 4266 case SIOCADDRT: 4267 case SIOCDELRT: 4268 case SIOCDARP: 4269 case SIOCGARP: 4270 case SIOCSARP: 4271 case SIOCGIFADDR: 4272 case SIOCSIFADDR: 4273 case SIOCGIFBRDADDR: 4274 case SIOCSIFBRDADDR: 4275 case SIOCGIFNETMASK: 4276 case SIOCSIFNETMASK: 4277 case SIOCGIFDSTADDR: 4278 case SIOCSIFDSTADDR: 4279 case SIOCSIFFLAGS: 4280 return inet_dgram_ops.ioctl(sock, cmd, arg); 4281 #endif 4282 4283 default: 4284 return -ENOIOCTLCMD; 4285 } 4286 return 0; 4287 } 4288 4289 static __poll_t packet_poll(struct file *file, struct socket *sock, 4290 poll_table *wait) 4291 { 4292 struct sock *sk = sock->sk; 4293 struct packet_sock *po = pkt_sk(sk); 4294 __poll_t mask = datagram_poll(file, sock, wait); 4295 4296 spin_lock_bh(&sk->sk_receive_queue.lock); 4297 if (po->rx_ring.pg_vec) { 4298 if (!packet_previous_rx_frame(po, &po->rx_ring, 4299 TP_STATUS_KERNEL)) 4300 mask |= EPOLLIN | EPOLLRDNORM; 4301 } 4302 packet_rcv_try_clear_pressure(po); 4303 spin_unlock_bh(&sk->sk_receive_queue.lock); 4304 spin_lock_bh(&sk->sk_write_queue.lock); 4305 if (po->tx_ring.pg_vec) { 4306 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4307 mask |= EPOLLOUT | EPOLLWRNORM; 4308 } 4309 spin_unlock_bh(&sk->sk_write_queue.lock); 4310 return mask; 4311 } 4312 4313 4314 /* Dirty? Well, I still did not learn better way to account 4315 * for user mmaps. 4316 */ 4317 4318 static void packet_mm_open(struct vm_area_struct *vma) 4319 { 4320 struct file *file = vma->vm_file; 4321 struct socket *sock = file->private_data; 4322 struct sock *sk = sock->sk; 4323 4324 if (sk) 4325 atomic_long_inc(&pkt_sk(sk)->mapped); 4326 } 4327 4328 static void packet_mm_close(struct vm_area_struct *vma) 4329 { 4330 struct file *file = vma->vm_file; 4331 struct socket *sock = file->private_data; 4332 struct sock *sk = sock->sk; 4333 4334 if (sk) 4335 atomic_long_dec(&pkt_sk(sk)->mapped); 4336 } 4337 4338 static const struct vm_operations_struct packet_mmap_ops = { 4339 .open = packet_mm_open, 4340 .close = packet_mm_close, 4341 }; 4342 4343 static void free_pg_vec(struct pgv *pg_vec, unsigned int order, 4344 unsigned int len) 4345 { 4346 int i; 4347 4348 for (i = 0; i < len; i++) { 4349 if (likely(pg_vec[i].buffer)) { 4350 if (is_vmalloc_addr(pg_vec[i].buffer)) 4351 vfree(pg_vec[i].buffer); 4352 else 4353 free_pages((unsigned long)pg_vec[i].buffer, 4354 order); 4355 pg_vec[i].buffer = NULL; 4356 } 4357 } 4358 kfree(pg_vec); 4359 } 4360 4361 static char *alloc_one_pg_vec_page(unsigned long order) 4362 { 4363 char *buffer; 4364 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | 4365 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 4366 4367 buffer = (char *) __get_free_pages(gfp_flags, order); 4368 if (buffer) 4369 return buffer; 4370 4371 /* __get_free_pages failed, fall back to vmalloc */ 4372 buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); 4373 if (buffer) 4374 return buffer; 4375 4376 /* vmalloc failed, lets dig into swap here */ 4377 gfp_flags &= ~__GFP_NORETRY; 4378 buffer = (char *) __get_free_pages(gfp_flags, order); 4379 if (buffer) 4380 return buffer; 4381 4382 /* complete and utter failure */ 4383 return NULL; 4384 } 4385 4386 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) 4387 { 4388 unsigned int block_nr = req->tp_block_nr; 4389 struct pgv *pg_vec; 4390 int i; 4391 4392 pg_vec = kzalloc_objs(struct pgv, block_nr, GFP_KERNEL | __GFP_NOWARN); 4393 if (unlikely(!pg_vec)) 4394 goto out; 4395 4396 for (i = 0; i < block_nr; i++) { 4397 pg_vec[i].buffer = alloc_one_pg_vec_page(order); 4398 if (unlikely(!pg_vec[i].buffer)) 4399 goto out_free_pgvec; 4400 } 4401 4402 out: 4403 return pg_vec; 4404 4405 out_free_pgvec: 4406 free_pg_vec(pg_vec, order, block_nr); 4407 pg_vec = NULL; 4408 goto out; 4409 } 4410 4411 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, 4412 int closing, int tx_ring) 4413 { 4414 struct pgv *pg_vec = NULL; 4415 struct packet_sock *po = pkt_sk(sk); 4416 unsigned long *rx_owner_map = NULL; 4417 int was_running, order = 0; 4418 struct packet_ring_buffer *rb; 4419 struct sk_buff_head *rb_queue; 4420 __be16 num; 4421 int err; 4422 /* Added to avoid minimal code churn */ 4423 struct tpacket_req *req = &req_u->req; 4424 4425 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4426 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4427 4428 err = -EBUSY; 4429 if (!closing) { 4430 if (atomic_long_read(&po->mapped)) 4431 goto out; 4432 if (packet_read_pending(rb)) 4433 goto out; 4434 } 4435 4436 if (req->tp_block_nr) { 4437 unsigned int min_frame_size; 4438 4439 /* Sanity tests and some calculations */ 4440 err = -EBUSY; 4441 if (unlikely(rb->pg_vec)) 4442 goto out; 4443 4444 switch (po->tp_version) { 4445 case TPACKET_V1: 4446 po->tp_hdrlen = TPACKET_HDRLEN; 4447 break; 4448 case TPACKET_V2: 4449 po->tp_hdrlen = TPACKET2_HDRLEN; 4450 break; 4451 case TPACKET_V3: 4452 po->tp_hdrlen = TPACKET3_HDRLEN; 4453 break; 4454 } 4455 4456 err = -EINVAL; 4457 if (unlikely((int)req->tp_block_size <= 0)) 4458 goto out; 4459 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4460 goto out; 4461 min_frame_size = po->tp_hdrlen + po->tp_reserve; 4462 if (po->tp_version >= TPACKET_V3 && 4463 req->tp_block_size < 4464 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) 4465 goto out; 4466 if (unlikely(req->tp_frame_size < min_frame_size)) 4467 goto out; 4468 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4469 goto out; 4470 4471 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4472 if (unlikely(rb->frames_per_block == 0)) 4473 goto out; 4474 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) 4475 goto out; 4476 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4477 req->tp_frame_nr)) 4478 goto out; 4479 4480 err = -ENOMEM; 4481 order = get_order(req->tp_block_size); 4482 pg_vec = alloc_pg_vec(req, order); 4483 if (unlikely(!pg_vec)) 4484 goto out; 4485 switch (po->tp_version) { 4486 case TPACKET_V3: 4487 /* Block transmit is not supported yet */ 4488 if (!tx_ring) { 4489 init_prb_bdqc(po, rb, pg_vec, req_u); 4490 } else { 4491 struct tpacket_req3 *req3 = &req_u->req3; 4492 4493 if (req3->tp_retire_blk_tov || 4494 req3->tp_sizeof_priv || 4495 req3->tp_feature_req_word) { 4496 err = -EINVAL; 4497 goto out_free_pg_vec; 4498 } 4499 } 4500 break; 4501 default: 4502 if (!tx_ring) { 4503 rx_owner_map = bitmap_alloc(req->tp_frame_nr, 4504 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 4505 if (!rx_owner_map) 4506 goto out_free_pg_vec; 4507 } 4508 break; 4509 } 4510 } 4511 /* Done */ 4512 else { 4513 err = -EINVAL; 4514 if (unlikely(req->tp_frame_nr)) 4515 goto out; 4516 } 4517 4518 4519 /* Detach socket from network */ 4520 spin_lock(&po->bind_lock); 4521 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING); 4522 num = po->num; 4523 WRITE_ONCE(po->num, 0); 4524 if (was_running) 4525 __unregister_prot_hook(sk, false); 4526 4527 spin_unlock(&po->bind_lock); 4528 4529 synchronize_net(); 4530 4531 err = -EBUSY; 4532 mutex_lock(&po->pg_vec_lock); 4533 if (closing || atomic_long_read(&po->mapped) == 0) { 4534 err = 0; 4535 spin_lock_bh(&rb_queue->lock); 4536 swap(rb->pg_vec, pg_vec); 4537 if (po->tp_version <= TPACKET_V2) 4538 swap(rb->rx_owner_map, rx_owner_map); 4539 rb->frame_max = (req->tp_frame_nr - 1); 4540 rb->head = 0; 4541 rb->frame_size = req->tp_frame_size; 4542 spin_unlock_bh(&rb_queue->lock); 4543 4544 swap(rb->pg_vec_order, order); 4545 swap(rb->pg_vec_len, req->tp_block_nr); 4546 4547 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4548 po->prot_hook.func = (po->rx_ring.pg_vec) ? 4549 tpacket_rcv : packet_rcv; 4550 skb_queue_purge(rb_queue); 4551 if (atomic_long_read(&po->mapped)) 4552 pr_err("packet_mmap: vma is busy: %ld\n", 4553 atomic_long_read(&po->mapped)); 4554 } 4555 mutex_unlock(&po->pg_vec_lock); 4556 4557 spin_lock(&po->bind_lock); 4558 WRITE_ONCE(po->num, num); 4559 if (was_running) 4560 register_prot_hook(sk); 4561 4562 spin_unlock(&po->bind_lock); 4563 if (pg_vec && (po->tp_version > TPACKET_V2)) { 4564 /* Because we don't support block-based V3 on tx-ring */ 4565 if (!tx_ring) 4566 prb_shutdown_retire_blk_timer(po, rb_queue); 4567 } 4568 4569 out_free_pg_vec: 4570 if (pg_vec) { 4571 bitmap_free(rx_owner_map); 4572 free_pg_vec(pg_vec, order, req->tp_block_nr); 4573 } 4574 out: 4575 return err; 4576 } 4577 4578 static int packet_mmap(struct file *file, struct socket *sock, 4579 struct vm_area_struct *vma) 4580 { 4581 struct sock *sk = sock->sk; 4582 struct packet_sock *po = pkt_sk(sk); 4583 unsigned long size, expected_size; 4584 struct packet_ring_buffer *rb; 4585 unsigned long start; 4586 int err = -EINVAL; 4587 int i; 4588 4589 if (vma->vm_pgoff) 4590 return -EINVAL; 4591 4592 mutex_lock(&po->pg_vec_lock); 4593 4594 expected_size = 0; 4595 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4596 if (rb->pg_vec) { 4597 expected_size += rb->pg_vec_len 4598 * rb->pg_vec_pages 4599 * PAGE_SIZE; 4600 } 4601 } 4602 4603 if (expected_size == 0) 4604 goto out; 4605 4606 size = vma->vm_end - vma->vm_start; 4607 if (size != expected_size) 4608 goto out; 4609 4610 start = vma->vm_start; 4611 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { 4612 if (rb->pg_vec == NULL) 4613 continue; 4614 4615 for (i = 0; i < rb->pg_vec_len; i++) { 4616 struct page *page; 4617 void *kaddr = rb->pg_vec[i].buffer; 4618 int pg_num; 4619 4620 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { 4621 page = pgv_to_page(kaddr); 4622 err = vm_insert_page(vma, start, page); 4623 if (unlikely(err)) 4624 goto out; 4625 start += PAGE_SIZE; 4626 kaddr += PAGE_SIZE; 4627 } 4628 } 4629 } 4630 4631 atomic_long_inc(&po->mapped); 4632 vma->vm_ops = &packet_mmap_ops; 4633 err = 0; 4634 4635 out: 4636 mutex_unlock(&po->pg_vec_lock); 4637 return err; 4638 } 4639 4640 static const struct proto_ops packet_ops_spkt = { 4641 .family = PF_PACKET, 4642 .owner = THIS_MODULE, 4643 .release = packet_release, 4644 .bind = packet_bind_spkt, 4645 .connect = sock_no_connect, 4646 .socketpair = sock_no_socketpair, 4647 .accept = sock_no_accept, 4648 .getname = packet_getname_spkt, 4649 .poll = datagram_poll, 4650 .ioctl = packet_ioctl, 4651 .gettstamp = sock_gettstamp, 4652 .listen = sock_no_listen, 4653 .shutdown = sock_no_shutdown, 4654 .sendmsg = packet_sendmsg_spkt, 4655 .recvmsg = packet_recvmsg, 4656 .mmap = sock_no_mmap, 4657 }; 4658 4659 static const struct proto_ops packet_ops = { 4660 .family = PF_PACKET, 4661 .owner = THIS_MODULE, 4662 .release = packet_release, 4663 .bind = packet_bind, 4664 .connect = sock_no_connect, 4665 .socketpair = sock_no_socketpair, 4666 .accept = sock_no_accept, 4667 .getname = packet_getname, 4668 .poll = packet_poll, 4669 .ioctl = packet_ioctl, 4670 .gettstamp = sock_gettstamp, 4671 .listen = sock_no_listen, 4672 .shutdown = sock_no_shutdown, 4673 .setsockopt = packet_setsockopt, 4674 .getsockopt_iter = packet_getsockopt, 4675 .sendmsg = packet_sendmsg, 4676 .recvmsg = packet_recvmsg, 4677 .mmap = packet_mmap, 4678 }; 4679 4680 static const struct net_proto_family packet_family_ops = { 4681 .family = PF_PACKET, 4682 .create = packet_create, 4683 .owner = THIS_MODULE, 4684 }; 4685 4686 static struct notifier_block packet_netdev_notifier = { 4687 .notifier_call = packet_notifier, 4688 }; 4689 4690 #ifdef CONFIG_PROC_FS 4691 4692 static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 4693 __acquires(RCU) 4694 { 4695 struct net *net = seq_file_net(seq); 4696 4697 rcu_read_lock(); 4698 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); 4699 } 4700 4701 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4702 { 4703 struct net *net = seq_file_net(seq); 4704 return seq_hlist_next_rcu(v, &net->packet.sklist, pos); 4705 } 4706 4707 static void packet_seq_stop(struct seq_file *seq, void *v) 4708 __releases(RCU) 4709 { 4710 rcu_read_unlock(); 4711 } 4712 4713 static int packet_seq_show(struct seq_file *seq, void *v) 4714 { 4715 if (v == SEQ_START_TOKEN) 4716 seq_printf(seq, 4717 "%*sRefCnt Type Proto Iface R Rmem User Inode\n", 4718 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk"); 4719 else { 4720 struct sock *s = sk_entry(v); 4721 const struct packet_sock *po = pkt_sk(s); 4722 4723 seq_printf(seq, 4724 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6llu\n", 4725 s, 4726 refcount_read(&s->sk_refcnt), 4727 s->sk_type, 4728 ntohs(READ_ONCE(po->num)), 4729 READ_ONCE(po->ifindex), 4730 packet_sock_flag(po, PACKET_SOCK_RUNNING), 4731 atomic_read(&s->sk_rmem_alloc), 4732 from_kuid_munged(seq_user_ns(seq), sk_uid(s)), 4733 sock_i_ino(s)); 4734 } 4735 4736 return 0; 4737 } 4738 4739 static const struct seq_operations packet_seq_ops = { 4740 .start = packet_seq_start, 4741 .next = packet_seq_next, 4742 .stop = packet_seq_stop, 4743 .show = packet_seq_show, 4744 }; 4745 #endif 4746 4747 static int __net_init packet_net_init(struct net *net) 4748 { 4749 mutex_init(&net->packet.sklist_lock); 4750 INIT_HLIST_HEAD(&net->packet.sklist); 4751 4752 #ifdef CONFIG_PROC_FS 4753 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops, 4754 sizeof(struct seq_net_private))) 4755 return -ENOMEM; 4756 #endif /* CONFIG_PROC_FS */ 4757 4758 return 0; 4759 } 4760 4761 static void __net_exit packet_net_exit(struct net *net) 4762 { 4763 remove_proc_entry("packet", net->proc_net); 4764 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); 4765 } 4766 4767 static struct pernet_operations packet_net_ops = { 4768 .init = packet_net_init, 4769 .exit = packet_net_exit, 4770 }; 4771 4772 4773 static void __exit packet_exit(void) 4774 { 4775 sock_unregister(PF_PACKET); 4776 proto_unregister(&packet_proto); 4777 unregister_netdevice_notifier(&packet_netdev_notifier); 4778 unregister_pernet_subsys(&packet_net_ops); 4779 } 4780 4781 static int __init packet_init(void) 4782 { 4783 int rc; 4784 4785 rc = register_pernet_subsys(&packet_net_ops); 4786 if (rc) 4787 goto out; 4788 rc = register_netdevice_notifier(&packet_netdev_notifier); 4789 if (rc) 4790 goto out_pernet; 4791 rc = proto_register(&packet_proto, 0); 4792 if (rc) 4793 goto out_notifier; 4794 rc = sock_register(&packet_family_ops); 4795 if (rc) 4796 goto out_proto; 4797 4798 return 0; 4799 4800 out_proto: 4801 proto_unregister(&packet_proto); 4802 out_notifier: 4803 unregister_netdevice_notifier(&packet_netdev_notifier); 4804 out_pernet: 4805 unregister_pernet_subsys(&packet_net_ops); 4806 out: 4807 return rc; 4808 } 4809 4810 module_init(packet_init); 4811 module_exit(packet_exit); 4812 MODULE_DESCRIPTION("Packet socket support (AF_PACKET)"); 4813 MODULE_LICENSE("GPL"); 4814 MODULE_ALIAS_NETPROTO(PF_PACKET); 4815