1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* A network driver using virtio. 3 * 4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 5 */ 6 //#define DEBUG 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/module.h> 11 #include <linux/virtio.h> 12 #include <linux/virtio_net.h> 13 #include <linux/bpf.h> 14 #include <linux/bpf_trace.h> 15 #include <linux/scatterlist.h> 16 #include <linux/if_vlan.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/average.h> 20 #include <linux/filter.h> 21 #include <linux/kernel.h> 22 #include <linux/dim.h> 23 #include <net/route.h> 24 #include <net/xdp.h> 25 #include <net/net_failover.h> 26 #include <net/netdev_rx_queue.h> 27 28 static int napi_weight = NAPI_POLL_WEIGHT; 29 module_param(napi_weight, int, 0444); 30 31 static bool csum = true, gso = true, napi_tx = true; 32 module_param(csum, bool, 0444); 33 module_param(gso, bool, 0444); 34 module_param(napi_tx, bool, 0644); 35 36 /* FIXME: MTU in config. */ 37 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 38 #define GOOD_COPY_LEN 128 39 40 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 41 42 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 43 #define VIRTIO_XDP_HEADROOM 256 44 45 /* Separating two types of XDP xmit */ 46 #define VIRTIO_XDP_TX BIT(0) 47 #define VIRTIO_XDP_REDIR BIT(1) 48 49 #define VIRTIO_XDP_FLAG BIT(0) 50 51 /* RX packet size EWMA. The average packet size is used to determine the packet 52 * buffer size when refilling RX rings. As the entire RX ring may be refilled 53 * at once, the weight is chosen so that the EWMA will be insensitive to short- 54 * term, transient changes in packet size. 55 */ 56 DECLARE_EWMA(pkt_len, 0, 64) 57 58 #define VIRTNET_DRIVER_VERSION "1.0.0" 59 60 static const unsigned long guest_offloads[] = { 61 VIRTIO_NET_F_GUEST_TSO4, 62 VIRTIO_NET_F_GUEST_TSO6, 63 VIRTIO_NET_F_GUEST_ECN, 64 VIRTIO_NET_F_GUEST_UFO, 65 VIRTIO_NET_F_GUEST_CSUM, 66 VIRTIO_NET_F_GUEST_USO4, 67 VIRTIO_NET_F_GUEST_USO6, 68 VIRTIO_NET_F_GUEST_HDRLEN 69 }; 70 71 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 72 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 73 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 74 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ 75 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ 76 (1ULL << VIRTIO_NET_F_GUEST_USO6)) 77 78 struct virtnet_stat_desc { 79 char desc[ETH_GSTRING_LEN]; 80 size_t offset; 81 }; 82 83 struct virtnet_sq_stats { 84 struct u64_stats_sync syncp; 85 u64_stats_t packets; 86 u64_stats_t bytes; 87 u64_stats_t xdp_tx; 88 u64_stats_t xdp_tx_drops; 89 u64_stats_t kicks; 90 u64_stats_t tx_timeouts; 91 }; 92 93 struct virtnet_rq_stats { 94 struct u64_stats_sync syncp; 95 u64_stats_t packets; 96 u64_stats_t bytes; 97 u64_stats_t drops; 98 u64_stats_t xdp_packets; 99 u64_stats_t xdp_tx; 100 u64_stats_t xdp_redirects; 101 u64_stats_t xdp_drops; 102 u64_stats_t kicks; 103 }; 104 105 #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) 106 #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) 107 108 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 109 { "packets", VIRTNET_SQ_STAT(packets) }, 110 { "bytes", VIRTNET_SQ_STAT(bytes) }, 111 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, 112 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, 113 { "kicks", VIRTNET_SQ_STAT(kicks) }, 114 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, 115 }; 116 117 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 118 { "packets", VIRTNET_RQ_STAT(packets) }, 119 { "bytes", VIRTNET_RQ_STAT(bytes) }, 120 { "drops", VIRTNET_RQ_STAT(drops) }, 121 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, 122 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, 123 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, 124 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, 125 { "kicks", VIRTNET_RQ_STAT(kicks) }, 126 }; 127 128 #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) 129 #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) 130 131 struct virtnet_interrupt_coalesce { 132 u32 max_packets; 133 u32 max_usecs; 134 }; 135 136 /* The dma information of pages allocated at a time. */ 137 struct virtnet_rq_dma { 138 dma_addr_t addr; 139 u32 ref; 140 u16 len; 141 u16 need_sync; 142 }; 143 144 /* Internal representation of a send virtqueue */ 145 struct send_queue { 146 /* Virtqueue associated with this send _queue */ 147 struct virtqueue *vq; 148 149 /* TX: fragments + linear part + virtio header */ 150 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 151 152 /* Name of the send queue: output.$index */ 153 char name[16]; 154 155 struct virtnet_sq_stats stats; 156 157 struct virtnet_interrupt_coalesce intr_coal; 158 159 struct napi_struct napi; 160 161 /* Record whether sq is in reset state. */ 162 bool reset; 163 }; 164 165 /* Internal representation of a receive virtqueue */ 166 struct receive_queue { 167 /* Virtqueue associated with this receive_queue */ 168 struct virtqueue *vq; 169 170 struct napi_struct napi; 171 172 struct bpf_prog __rcu *xdp_prog; 173 174 struct virtnet_rq_stats stats; 175 176 /* The number of rx notifications */ 177 u16 calls; 178 179 /* Is dynamic interrupt moderation enabled? */ 180 bool dim_enabled; 181 182 /* Dynamic Interrupt Moderation */ 183 struct dim dim; 184 185 u32 packets_in_napi; 186 187 struct virtnet_interrupt_coalesce intr_coal; 188 189 /* Chain pages by the private ptr. */ 190 struct page *pages; 191 192 /* Average packet length for mergeable receive buffers. */ 193 struct ewma_pkt_len mrg_avg_pkt_len; 194 195 /* Page frag for packet buffer allocation. */ 196 struct page_frag alloc_frag; 197 198 /* RX: fragments + linear part + virtio header */ 199 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 200 201 /* Min single buffer size for mergeable buffers case. */ 202 unsigned int min_buf_len; 203 204 /* Name of this receive queue: input.$index */ 205 char name[16]; 206 207 struct xdp_rxq_info xdp_rxq; 208 209 /* Record the last dma info to free after new pages is allocated. */ 210 struct virtnet_rq_dma *last_dma; 211 212 /* Do dma by self */ 213 bool do_dma; 214 }; 215 216 /* This structure can contain rss message with maximum settings for indirection table and keysize 217 * Note, that default structure that describes RSS configuration virtio_net_rss_config 218 * contains same info but can't handle table values. 219 * In any case, structure would be passed to virtio hw through sg_buf split by parts 220 * because table sizes may be differ according to the device configuration. 221 */ 222 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 223 #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 224 struct virtio_net_ctrl_rss { 225 u32 hash_types; 226 u16 indirection_table_mask; 227 u16 unclassified_queue; 228 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; 229 u16 max_tx_vq; 230 u8 hash_key_length; 231 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; 232 }; 233 234 /* Control VQ buffers: protected by the rtnl lock */ 235 struct control_buf { 236 struct virtio_net_ctrl_hdr hdr; 237 virtio_net_ctrl_ack status; 238 struct virtio_net_ctrl_mq mq; 239 u8 promisc; 240 u8 allmulti; 241 __virtio16 vid; 242 __virtio64 offloads; 243 struct virtio_net_ctrl_rss rss; 244 struct virtio_net_ctrl_coal_tx coal_tx; 245 struct virtio_net_ctrl_coal_rx coal_rx; 246 struct virtio_net_ctrl_coal_vq coal_vq; 247 }; 248 249 struct virtnet_info { 250 struct virtio_device *vdev; 251 struct virtqueue *cvq; 252 struct net_device *dev; 253 struct send_queue *sq; 254 struct receive_queue *rq; 255 unsigned int status; 256 257 /* Max # of queue pairs supported by the device */ 258 u16 max_queue_pairs; 259 260 /* # of queue pairs currently used by the driver */ 261 u16 curr_queue_pairs; 262 263 /* # of XDP queue pairs currently used by the driver */ 264 u16 xdp_queue_pairs; 265 266 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ 267 bool xdp_enabled; 268 269 /* I like... big packets and I cannot lie! */ 270 bool big_packets; 271 272 /* number of sg entries allocated for big packets */ 273 unsigned int big_packets_num_skbfrags; 274 275 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 276 bool mergeable_rx_bufs; 277 278 /* Host supports rss and/or hash report */ 279 bool has_rss; 280 bool has_rss_hash_report; 281 u8 rss_key_size; 282 u16 rss_indir_table_size; 283 u32 rss_hash_types_supported; 284 u32 rss_hash_types_saved; 285 286 /* Has control virtqueue */ 287 bool has_cvq; 288 289 /* Host can handle any s/g split between our header and packet data */ 290 bool any_header_sg; 291 292 /* Packet virtio header size */ 293 u8 hdr_len; 294 295 /* Work struct for delayed refilling if we run low on memory. */ 296 struct delayed_work refill; 297 298 /* Is delayed refill enabled? */ 299 bool refill_enabled; 300 301 /* The lock to synchronize the access to refill_enabled */ 302 spinlock_t refill_lock; 303 304 /* Work struct for config space updates */ 305 struct work_struct config_work; 306 307 /* Does the affinity hint is set for virtqueues? */ 308 bool affinity_hint_set; 309 310 /* CPU hotplug instances for online & dead */ 311 struct hlist_node node; 312 struct hlist_node node_dead; 313 314 struct control_buf *ctrl; 315 316 /* Ethtool settings */ 317 u8 duplex; 318 u32 speed; 319 320 /* Is rx dynamic interrupt moderation enabled? */ 321 bool rx_dim_enabled; 322 323 /* Interrupt coalescing settings */ 324 struct virtnet_interrupt_coalesce intr_coal_tx; 325 struct virtnet_interrupt_coalesce intr_coal_rx; 326 327 unsigned long guest_offloads; 328 unsigned long guest_offloads_capable; 329 330 /* failover when STANDBY feature enabled */ 331 struct failover *failover; 332 }; 333 334 struct padded_vnet_hdr { 335 struct virtio_net_hdr_v1_hash hdr; 336 /* 337 * hdr is in a separate sg buffer, and data sg buffer shares same page 338 * with this header sg. This padding makes next sg 16 byte aligned 339 * after the header. 340 */ 341 char padding[12]; 342 }; 343 344 struct virtio_net_common_hdr { 345 union { 346 struct virtio_net_hdr hdr; 347 struct virtio_net_hdr_mrg_rxbuf mrg_hdr; 348 struct virtio_net_hdr_v1_hash hash_v1_hdr; 349 }; 350 }; 351 352 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); 353 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); 354 355 static bool is_xdp_frame(void *ptr) 356 { 357 return (unsigned long)ptr & VIRTIO_XDP_FLAG; 358 } 359 360 static void *xdp_to_ptr(struct xdp_frame *ptr) 361 { 362 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); 363 } 364 365 static struct xdp_frame *ptr_to_xdp(void *ptr) 366 { 367 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); 368 } 369 370 /* Converting between virtqueue no. and kernel tx/rx queue no. 371 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 372 */ 373 static int vq2txq(struct virtqueue *vq) 374 { 375 return (vq->index - 1) / 2; 376 } 377 378 static int txq2vq(int txq) 379 { 380 return txq * 2 + 1; 381 } 382 383 static int vq2rxq(struct virtqueue *vq) 384 { 385 return vq->index / 2; 386 } 387 388 static int rxq2vq(int rxq) 389 { 390 return rxq * 2; 391 } 392 393 static inline struct virtio_net_common_hdr * 394 skb_vnet_common_hdr(struct sk_buff *skb) 395 { 396 return (struct virtio_net_common_hdr *)skb->cb; 397 } 398 399 /* 400 * private is used to chain pages for big packets, put the whole 401 * most recent used list in the beginning for reuse 402 */ 403 static void give_pages(struct receive_queue *rq, struct page *page) 404 { 405 struct page *end; 406 407 /* Find end of list, sew whole thing into vi->rq.pages. */ 408 for (end = page; end->private; end = (struct page *)end->private); 409 end->private = (unsigned long)rq->pages; 410 rq->pages = page; 411 } 412 413 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 414 { 415 struct page *p = rq->pages; 416 417 if (p) { 418 rq->pages = (struct page *)p->private; 419 /* clear private here, it is used to chain pages */ 420 p->private = 0; 421 } else 422 p = alloc_page(gfp_mask); 423 return p; 424 } 425 426 static void enable_delayed_refill(struct virtnet_info *vi) 427 { 428 spin_lock_bh(&vi->refill_lock); 429 vi->refill_enabled = true; 430 spin_unlock_bh(&vi->refill_lock); 431 } 432 433 static void disable_delayed_refill(struct virtnet_info *vi) 434 { 435 spin_lock_bh(&vi->refill_lock); 436 vi->refill_enabled = false; 437 spin_unlock_bh(&vi->refill_lock); 438 } 439 440 static void virtqueue_napi_schedule(struct napi_struct *napi, 441 struct virtqueue *vq) 442 { 443 if (napi_schedule_prep(napi)) { 444 virtqueue_disable_cb(vq); 445 __napi_schedule(napi); 446 } 447 } 448 449 static bool virtqueue_napi_complete(struct napi_struct *napi, 450 struct virtqueue *vq, int processed) 451 { 452 int opaque; 453 454 opaque = virtqueue_enable_cb_prepare(vq); 455 if (napi_complete_done(napi, processed)) { 456 if (unlikely(virtqueue_poll(vq, opaque))) 457 virtqueue_napi_schedule(napi, vq); 458 else 459 return true; 460 } else { 461 virtqueue_disable_cb(vq); 462 } 463 464 return false; 465 } 466 467 static void skb_xmit_done(struct virtqueue *vq) 468 { 469 struct virtnet_info *vi = vq->vdev->priv; 470 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 471 472 /* Suppress further interrupts. */ 473 virtqueue_disable_cb(vq); 474 475 if (napi->weight) 476 virtqueue_napi_schedule(napi, vq); 477 else 478 /* We were probably waiting for more output buffers. */ 479 netif_wake_subqueue(vi->dev, vq2txq(vq)); 480 } 481 482 #define MRG_CTX_HEADER_SHIFT 22 483 static void *mergeable_len_to_ctx(unsigned int truesize, 484 unsigned int headroom) 485 { 486 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 487 } 488 489 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 490 { 491 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 492 } 493 494 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 495 { 496 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 497 } 498 499 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, 500 unsigned int headroom, 501 unsigned int len) 502 { 503 struct sk_buff *skb; 504 505 skb = build_skb(buf, buflen); 506 if (unlikely(!skb)) 507 return NULL; 508 509 skb_reserve(skb, headroom); 510 skb_put(skb, len); 511 512 return skb; 513 } 514 515 /* Called from bottom half context */ 516 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 517 struct receive_queue *rq, 518 struct page *page, unsigned int offset, 519 unsigned int len, unsigned int truesize, 520 unsigned int headroom) 521 { 522 struct sk_buff *skb; 523 struct virtio_net_common_hdr *hdr; 524 unsigned int copy, hdr_len, hdr_padded_len; 525 struct page *page_to_free = NULL; 526 int tailroom, shinfo_size; 527 char *p, *hdr_p, *buf; 528 529 p = page_address(page) + offset; 530 hdr_p = p; 531 532 hdr_len = vi->hdr_len; 533 if (vi->mergeable_rx_bufs) 534 hdr_padded_len = hdr_len; 535 else 536 hdr_padded_len = sizeof(struct padded_vnet_hdr); 537 538 buf = p - headroom; 539 len -= hdr_len; 540 offset += hdr_padded_len; 541 p += hdr_padded_len; 542 tailroom = truesize - headroom - hdr_padded_len - len; 543 544 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 545 546 /* copy small packet so we can reuse these pages */ 547 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { 548 skb = virtnet_build_skb(buf, truesize, p - buf, len); 549 if (unlikely(!skb)) 550 return NULL; 551 552 page = (struct page *)page->private; 553 if (page) 554 give_pages(rq, page); 555 goto ok; 556 } 557 558 /* copy small packet so we can reuse these pages for small data */ 559 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 560 if (unlikely(!skb)) 561 return NULL; 562 563 /* Copy all frame if it fits skb->head, otherwise 564 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. 565 */ 566 if (len <= skb_tailroom(skb)) 567 copy = len; 568 else 569 copy = ETH_HLEN; 570 skb_put_data(skb, p, copy); 571 572 len -= copy; 573 offset += copy; 574 575 if (vi->mergeable_rx_bufs) { 576 if (len) 577 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 578 else 579 page_to_free = page; 580 goto ok; 581 } 582 583 /* 584 * Verify that we can indeed put this data into a skb. 585 * This is here to handle cases when the device erroneously 586 * tries to receive more than is possible. This is usually 587 * the case of a broken device. 588 */ 589 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 590 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 591 dev_kfree_skb(skb); 592 return NULL; 593 } 594 BUG_ON(offset >= PAGE_SIZE); 595 while (len) { 596 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 597 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 598 frag_size, truesize); 599 len -= frag_size; 600 page = (struct page *)page->private; 601 offset = 0; 602 } 603 604 if (page) 605 give_pages(rq, page); 606 607 ok: 608 hdr = skb_vnet_common_hdr(skb); 609 memcpy(hdr, hdr_p, hdr_len); 610 if (page_to_free) 611 put_page(page_to_free); 612 613 return skb; 614 } 615 616 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) 617 { 618 struct page *page = virt_to_head_page(buf); 619 struct virtnet_rq_dma *dma; 620 void *head; 621 int offset; 622 623 head = page_address(page); 624 625 dma = head; 626 627 --dma->ref; 628 629 if (dma->need_sync && len) { 630 offset = buf - (head + sizeof(*dma)); 631 632 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, 633 offset, len, 634 DMA_FROM_DEVICE); 635 } 636 637 if (dma->ref) 638 return; 639 640 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, 641 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 642 put_page(page); 643 } 644 645 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) 646 { 647 void *buf; 648 649 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); 650 if (buf && rq->do_dma) 651 virtnet_rq_unmap(rq, buf, *len); 652 653 return buf; 654 } 655 656 static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) 657 { 658 void *buf; 659 660 buf = virtqueue_detach_unused_buf(rq->vq); 661 if (buf && rq->do_dma) 662 virtnet_rq_unmap(rq, buf, 0); 663 664 return buf; 665 } 666 667 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) 668 { 669 struct virtnet_rq_dma *dma; 670 dma_addr_t addr; 671 u32 offset; 672 void *head; 673 674 if (!rq->do_dma) { 675 sg_init_one(rq->sg, buf, len); 676 return; 677 } 678 679 head = page_address(rq->alloc_frag.page); 680 681 offset = buf - head; 682 683 dma = head; 684 685 addr = dma->addr - sizeof(*dma) + offset; 686 687 sg_init_table(rq->sg, 1); 688 rq->sg[0].dma_address = addr; 689 rq->sg[0].length = len; 690 } 691 692 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) 693 { 694 struct page_frag *alloc_frag = &rq->alloc_frag; 695 struct virtnet_rq_dma *dma; 696 void *buf, *head; 697 dma_addr_t addr; 698 699 if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp))) 700 return NULL; 701 702 head = page_address(alloc_frag->page); 703 704 if (rq->do_dma) { 705 dma = head; 706 707 /* new pages */ 708 if (!alloc_frag->offset) { 709 if (rq->last_dma) { 710 /* Now, the new page is allocated, the last dma 711 * will not be used. So the dma can be unmapped 712 * if the ref is 0. 713 */ 714 virtnet_rq_unmap(rq, rq->last_dma, 0); 715 rq->last_dma = NULL; 716 } 717 718 dma->len = alloc_frag->size - sizeof(*dma); 719 720 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, 721 dma->len, DMA_FROM_DEVICE, 0); 722 if (virtqueue_dma_mapping_error(rq->vq, addr)) 723 return NULL; 724 725 dma->addr = addr; 726 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); 727 728 /* Add a reference to dma to prevent the entire dma from 729 * being released during error handling. This reference 730 * will be freed after the pages are no longer used. 731 */ 732 get_page(alloc_frag->page); 733 dma->ref = 1; 734 alloc_frag->offset = sizeof(*dma); 735 736 rq->last_dma = dma; 737 } 738 739 ++dma->ref; 740 } 741 742 buf = head + alloc_frag->offset; 743 744 get_page(alloc_frag->page); 745 alloc_frag->offset += size; 746 747 return buf; 748 } 749 750 static void virtnet_rq_set_premapped(struct virtnet_info *vi) 751 { 752 int i; 753 754 /* disable for big mode */ 755 if (!vi->mergeable_rx_bufs && vi->big_packets) 756 return; 757 758 for (i = 0; i < vi->max_queue_pairs; i++) { 759 if (virtqueue_set_dma_premapped(vi->rq[i].vq)) 760 continue; 761 762 vi->rq[i].do_dma = true; 763 } 764 } 765 766 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 767 { 768 unsigned int len; 769 unsigned int packets = 0; 770 unsigned int bytes = 0; 771 void *ptr; 772 773 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 774 if (likely(!is_xdp_frame(ptr))) { 775 struct sk_buff *skb = ptr; 776 777 pr_debug("Sent skb %p\n", skb); 778 779 bytes += skb->len; 780 napi_consume_skb(skb, in_napi); 781 } else { 782 struct xdp_frame *frame = ptr_to_xdp(ptr); 783 784 bytes += xdp_get_frame_len(frame); 785 xdp_return_frame(frame); 786 } 787 packets++; 788 } 789 790 /* Avoid overhead when no packets have been processed 791 * happens when called speculatively from start_xmit. 792 */ 793 if (!packets) 794 return; 795 796 u64_stats_update_begin(&sq->stats.syncp); 797 u64_stats_add(&sq->stats.bytes, bytes); 798 u64_stats_add(&sq->stats.packets, packets); 799 u64_stats_update_end(&sq->stats.syncp); 800 } 801 802 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 803 { 804 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 805 return false; 806 else if (q < vi->curr_queue_pairs) 807 return true; 808 else 809 return false; 810 } 811 812 static void check_sq_full_and_disable(struct virtnet_info *vi, 813 struct net_device *dev, 814 struct send_queue *sq) 815 { 816 bool use_napi = sq->napi.weight; 817 int qnum; 818 819 qnum = sq - vi->sq; 820 821 /* If running out of space, stop queue to avoid getting packets that we 822 * are then unable to transmit. 823 * An alternative would be to force queuing layer to requeue the skb by 824 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 825 * returned in a normal path of operation: it means that driver is not 826 * maintaining the TX queue stop/start state properly, and causes 827 * the stack to do a non-trivial amount of useless work. 828 * Since most packets only take 1 or 2 ring slots, stopping the queue 829 * early means 16 slots are typically wasted. 830 */ 831 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 832 netif_stop_subqueue(dev, qnum); 833 if (use_napi) { 834 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 835 virtqueue_napi_schedule(&sq->napi, sq->vq); 836 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 837 /* More just got used, free them then recheck. */ 838 free_old_xmit_skbs(sq, false); 839 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 840 netif_start_subqueue(dev, qnum); 841 virtqueue_disable_cb(sq->vq); 842 } 843 } 844 } 845 } 846 847 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 848 struct send_queue *sq, 849 struct xdp_frame *xdpf) 850 { 851 struct virtio_net_hdr_mrg_rxbuf *hdr; 852 struct skb_shared_info *shinfo; 853 u8 nr_frags = 0; 854 int err, i; 855 856 if (unlikely(xdpf->headroom < vi->hdr_len)) 857 return -EOVERFLOW; 858 859 if (unlikely(xdp_frame_has_frags(xdpf))) { 860 shinfo = xdp_get_shared_info_from_frame(xdpf); 861 nr_frags = shinfo->nr_frags; 862 } 863 864 /* In wrapping function virtnet_xdp_xmit(), we need to free 865 * up the pending old buffers, where we need to calculate the 866 * position of skb_shared_info in xdp_get_frame_len() and 867 * xdp_return_frame(), which will involve to xdpf->data and 868 * xdpf->headroom. Therefore, we need to update the value of 869 * headroom synchronously here. 870 */ 871 xdpf->headroom -= vi->hdr_len; 872 xdpf->data -= vi->hdr_len; 873 /* Zero header and leave csum up to XDP layers */ 874 hdr = xdpf->data; 875 memset(hdr, 0, vi->hdr_len); 876 xdpf->len += vi->hdr_len; 877 878 sg_init_table(sq->sg, nr_frags + 1); 879 sg_set_buf(sq->sg, xdpf->data, xdpf->len); 880 for (i = 0; i < nr_frags; i++) { 881 skb_frag_t *frag = &shinfo->frags[i]; 882 883 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), 884 skb_frag_size(frag), skb_frag_off(frag)); 885 } 886 887 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, 888 xdp_to_ptr(xdpf), GFP_ATOMIC); 889 if (unlikely(err)) 890 return -ENOSPC; /* Caller handle free/refcnt */ 891 892 return 0; 893 } 894 895 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on 896 * the current cpu, so it does not need to be locked. 897 * 898 * Here we use marco instead of inline functions because we have to deal with 899 * three issues at the same time: 1. the choice of sq. 2. judge and execute the 900 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline 901 * functions to perfectly solve these three problems at the same time. 902 */ 903 #define virtnet_xdp_get_sq(vi) ({ \ 904 int cpu = smp_processor_id(); \ 905 struct netdev_queue *txq; \ 906 typeof(vi) v = (vi); \ 907 unsigned int qp; \ 908 \ 909 if (v->curr_queue_pairs > nr_cpu_ids) { \ 910 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ 911 qp += cpu; \ 912 txq = netdev_get_tx_queue(v->dev, qp); \ 913 __netif_tx_acquire(txq); \ 914 } else { \ 915 qp = cpu % v->curr_queue_pairs; \ 916 txq = netdev_get_tx_queue(v->dev, qp); \ 917 __netif_tx_lock(txq, cpu); \ 918 } \ 919 v->sq + qp; \ 920 }) 921 922 #define virtnet_xdp_put_sq(vi, q) { \ 923 struct netdev_queue *txq; \ 924 typeof(vi) v = (vi); \ 925 \ 926 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ 927 if (v->curr_queue_pairs > nr_cpu_ids) \ 928 __netif_tx_release(txq); \ 929 else \ 930 __netif_tx_unlock(txq); \ 931 } 932 933 static int virtnet_xdp_xmit(struct net_device *dev, 934 int n, struct xdp_frame **frames, u32 flags) 935 { 936 struct virtnet_info *vi = netdev_priv(dev); 937 struct receive_queue *rq = vi->rq; 938 struct bpf_prog *xdp_prog; 939 struct send_queue *sq; 940 unsigned int len; 941 int packets = 0; 942 int bytes = 0; 943 int nxmit = 0; 944 int kicks = 0; 945 void *ptr; 946 int ret; 947 int i; 948 949 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 950 * indicate XDP resources have been successfully allocated. 951 */ 952 xdp_prog = rcu_access_pointer(rq->xdp_prog); 953 if (!xdp_prog) 954 return -ENXIO; 955 956 sq = virtnet_xdp_get_sq(vi); 957 958 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 959 ret = -EINVAL; 960 goto out; 961 } 962 963 /* Free up any pending old buffers before queueing new ones. */ 964 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 965 if (likely(is_xdp_frame(ptr))) { 966 struct xdp_frame *frame = ptr_to_xdp(ptr); 967 968 bytes += xdp_get_frame_len(frame); 969 xdp_return_frame(frame); 970 } else { 971 struct sk_buff *skb = ptr; 972 973 bytes += skb->len; 974 napi_consume_skb(skb, false); 975 } 976 packets++; 977 } 978 979 for (i = 0; i < n; i++) { 980 struct xdp_frame *xdpf = frames[i]; 981 982 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) 983 break; 984 nxmit++; 985 } 986 ret = nxmit; 987 988 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) 989 check_sq_full_and_disable(vi, dev, sq); 990 991 if (flags & XDP_XMIT_FLUSH) { 992 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 993 kicks = 1; 994 } 995 out: 996 u64_stats_update_begin(&sq->stats.syncp); 997 u64_stats_add(&sq->stats.bytes, bytes); 998 u64_stats_add(&sq->stats.packets, packets); 999 u64_stats_add(&sq->stats.xdp_tx, n); 1000 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); 1001 u64_stats_add(&sq->stats.kicks, kicks); 1002 u64_stats_update_end(&sq->stats.syncp); 1003 1004 virtnet_xdp_put_sq(vi, sq); 1005 return ret; 1006 } 1007 1008 static void put_xdp_frags(struct xdp_buff *xdp) 1009 { 1010 struct skb_shared_info *shinfo; 1011 struct page *xdp_page; 1012 int i; 1013 1014 if (xdp_buff_has_frags(xdp)) { 1015 shinfo = xdp_get_shared_info_from_buff(xdp); 1016 for (i = 0; i < shinfo->nr_frags; i++) { 1017 xdp_page = skb_frag_page(&shinfo->frags[i]); 1018 put_page(xdp_page); 1019 } 1020 } 1021 } 1022 1023 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, 1024 struct net_device *dev, 1025 unsigned int *xdp_xmit, 1026 struct virtnet_rq_stats *stats) 1027 { 1028 struct xdp_frame *xdpf; 1029 int err; 1030 u32 act; 1031 1032 act = bpf_prog_run_xdp(xdp_prog, xdp); 1033 u64_stats_inc(&stats->xdp_packets); 1034 1035 switch (act) { 1036 case XDP_PASS: 1037 return act; 1038 1039 case XDP_TX: 1040 u64_stats_inc(&stats->xdp_tx); 1041 xdpf = xdp_convert_buff_to_frame(xdp); 1042 if (unlikely(!xdpf)) { 1043 netdev_dbg(dev, "convert buff to frame failed for xdp\n"); 1044 return XDP_DROP; 1045 } 1046 1047 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 1048 if (unlikely(!err)) { 1049 xdp_return_frame_rx_napi(xdpf); 1050 } else if (unlikely(err < 0)) { 1051 trace_xdp_exception(dev, xdp_prog, act); 1052 return XDP_DROP; 1053 } 1054 *xdp_xmit |= VIRTIO_XDP_TX; 1055 return act; 1056 1057 case XDP_REDIRECT: 1058 u64_stats_inc(&stats->xdp_redirects); 1059 err = xdp_do_redirect(dev, xdp, xdp_prog); 1060 if (err) 1061 return XDP_DROP; 1062 1063 *xdp_xmit |= VIRTIO_XDP_REDIR; 1064 return act; 1065 1066 default: 1067 bpf_warn_invalid_xdp_action(dev, xdp_prog, act); 1068 fallthrough; 1069 case XDP_ABORTED: 1070 trace_xdp_exception(dev, xdp_prog, act); 1071 fallthrough; 1072 case XDP_DROP: 1073 return XDP_DROP; 1074 } 1075 } 1076 1077 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 1078 { 1079 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; 1080 } 1081 1082 /* We copy the packet for XDP in the following cases: 1083 * 1084 * 1) Packet is scattered across multiple rx buffers. 1085 * 2) Headroom space is insufficient. 1086 * 1087 * This is inefficient but it's a temporary condition that 1088 * we hit right after XDP is enabled and until queue is refilled 1089 * with large buffers with sufficient headroom - so it should affect 1090 * at most queue size packets. 1091 * Afterwards, the conditions to enable 1092 * XDP should preclude the underlying device from sending packets 1093 * across multiple buffers (num_buf > 1), and we make sure buffers 1094 * have enough headroom. 1095 */ 1096 static struct page *xdp_linearize_page(struct receive_queue *rq, 1097 int *num_buf, 1098 struct page *p, 1099 int offset, 1100 int page_off, 1101 unsigned int *len) 1102 { 1103 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1104 struct page *page; 1105 1106 if (page_off + *len + tailroom > PAGE_SIZE) 1107 return NULL; 1108 1109 page = alloc_page(GFP_ATOMIC); 1110 if (!page) 1111 return NULL; 1112 1113 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 1114 page_off += *len; 1115 1116 while (--*num_buf) { 1117 unsigned int buflen; 1118 void *buf; 1119 int off; 1120 1121 buf = virtnet_rq_get_buf(rq, &buflen, NULL); 1122 if (unlikely(!buf)) 1123 goto err_buf; 1124 1125 p = virt_to_head_page(buf); 1126 off = buf - page_address(p); 1127 1128 /* guard against a misconfigured or uncooperative backend that 1129 * is sending packet larger than the MTU. 1130 */ 1131 if ((page_off + buflen + tailroom) > PAGE_SIZE) { 1132 put_page(p); 1133 goto err_buf; 1134 } 1135 1136 memcpy(page_address(page) + page_off, 1137 page_address(p) + off, buflen); 1138 page_off += buflen; 1139 put_page(p); 1140 } 1141 1142 /* Headroom does not contribute to packet length */ 1143 *len = page_off - VIRTIO_XDP_HEADROOM; 1144 return page; 1145 err_buf: 1146 __free_pages(page, 0); 1147 return NULL; 1148 } 1149 1150 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, 1151 unsigned int xdp_headroom, 1152 void *buf, 1153 unsigned int len) 1154 { 1155 unsigned int header_offset; 1156 unsigned int headroom; 1157 unsigned int buflen; 1158 struct sk_buff *skb; 1159 1160 header_offset = VIRTNET_RX_PAD + xdp_headroom; 1161 headroom = vi->hdr_len + header_offset; 1162 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1163 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1164 1165 skb = virtnet_build_skb(buf, buflen, headroom, len); 1166 if (unlikely(!skb)) 1167 return NULL; 1168 1169 buf += header_offset; 1170 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); 1171 1172 return skb; 1173 } 1174 1175 static struct sk_buff *receive_small_xdp(struct net_device *dev, 1176 struct virtnet_info *vi, 1177 struct receive_queue *rq, 1178 struct bpf_prog *xdp_prog, 1179 void *buf, 1180 unsigned int xdp_headroom, 1181 unsigned int len, 1182 unsigned int *xdp_xmit, 1183 struct virtnet_rq_stats *stats) 1184 { 1185 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 1186 unsigned int headroom = vi->hdr_len + header_offset; 1187 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 1188 struct page *page = virt_to_head_page(buf); 1189 struct page *xdp_page; 1190 unsigned int buflen; 1191 struct xdp_buff xdp; 1192 struct sk_buff *skb; 1193 unsigned int metasize = 0; 1194 u32 act; 1195 1196 if (unlikely(hdr->hdr.gso_type)) 1197 goto err_xdp; 1198 1199 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1200 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1201 1202 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 1203 int offset = buf - page_address(page) + header_offset; 1204 unsigned int tlen = len + vi->hdr_len; 1205 int num_buf = 1; 1206 1207 xdp_headroom = virtnet_get_headroom(vi); 1208 header_offset = VIRTNET_RX_PAD + xdp_headroom; 1209 headroom = vi->hdr_len + header_offset; 1210 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1211 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1212 xdp_page = xdp_linearize_page(rq, &num_buf, page, 1213 offset, header_offset, 1214 &tlen); 1215 if (!xdp_page) 1216 goto err_xdp; 1217 1218 buf = page_address(xdp_page); 1219 put_page(page); 1220 page = xdp_page; 1221 } 1222 1223 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); 1224 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, 1225 xdp_headroom, len, true); 1226 1227 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1228 1229 switch (act) { 1230 case XDP_PASS: 1231 /* Recalculate length in case bpf program changed it */ 1232 len = xdp.data_end - xdp.data; 1233 metasize = xdp.data - xdp.data_meta; 1234 break; 1235 1236 case XDP_TX: 1237 case XDP_REDIRECT: 1238 goto xdp_xmit; 1239 1240 default: 1241 goto err_xdp; 1242 } 1243 1244 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); 1245 if (unlikely(!skb)) 1246 goto err; 1247 1248 if (metasize) 1249 skb_metadata_set(skb, metasize); 1250 1251 return skb; 1252 1253 err_xdp: 1254 u64_stats_inc(&stats->xdp_drops); 1255 err: 1256 u64_stats_inc(&stats->drops); 1257 put_page(page); 1258 xdp_xmit: 1259 return NULL; 1260 } 1261 1262 static struct sk_buff *receive_small(struct net_device *dev, 1263 struct virtnet_info *vi, 1264 struct receive_queue *rq, 1265 void *buf, void *ctx, 1266 unsigned int len, 1267 unsigned int *xdp_xmit, 1268 struct virtnet_rq_stats *stats) 1269 { 1270 unsigned int xdp_headroom = (unsigned long)ctx; 1271 struct page *page = virt_to_head_page(buf); 1272 struct sk_buff *skb; 1273 1274 len -= vi->hdr_len; 1275 u64_stats_add(&stats->bytes, len); 1276 1277 if (unlikely(len > GOOD_PACKET_LEN)) { 1278 pr_debug("%s: rx error: len %u exceeds max size %d\n", 1279 dev->name, len, GOOD_PACKET_LEN); 1280 DEV_STATS_INC(dev, rx_length_errors); 1281 goto err; 1282 } 1283 1284 if (unlikely(vi->xdp_enabled)) { 1285 struct bpf_prog *xdp_prog; 1286 1287 rcu_read_lock(); 1288 xdp_prog = rcu_dereference(rq->xdp_prog); 1289 if (xdp_prog) { 1290 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, 1291 xdp_headroom, len, xdp_xmit, 1292 stats); 1293 rcu_read_unlock(); 1294 return skb; 1295 } 1296 rcu_read_unlock(); 1297 } 1298 1299 skb = receive_small_build_skb(vi, xdp_headroom, buf, len); 1300 if (likely(skb)) 1301 return skb; 1302 1303 err: 1304 u64_stats_inc(&stats->drops); 1305 put_page(page); 1306 return NULL; 1307 } 1308 1309 static struct sk_buff *receive_big(struct net_device *dev, 1310 struct virtnet_info *vi, 1311 struct receive_queue *rq, 1312 void *buf, 1313 unsigned int len, 1314 struct virtnet_rq_stats *stats) 1315 { 1316 struct page *page = buf; 1317 struct sk_buff *skb = 1318 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 1319 1320 u64_stats_add(&stats->bytes, len - vi->hdr_len); 1321 if (unlikely(!skb)) 1322 goto err; 1323 1324 return skb; 1325 1326 err: 1327 u64_stats_inc(&stats->drops); 1328 give_pages(rq, page); 1329 return NULL; 1330 } 1331 1332 static void mergeable_buf_free(struct receive_queue *rq, int num_buf, 1333 struct net_device *dev, 1334 struct virtnet_rq_stats *stats) 1335 { 1336 struct page *page; 1337 void *buf; 1338 int len; 1339 1340 while (num_buf-- > 1) { 1341 buf = virtnet_rq_get_buf(rq, &len, NULL); 1342 if (unlikely(!buf)) { 1343 pr_debug("%s: rx error: %d buffers missing\n", 1344 dev->name, num_buf); 1345 DEV_STATS_INC(dev, rx_length_errors); 1346 break; 1347 } 1348 u64_stats_add(&stats->bytes, len); 1349 page = virt_to_head_page(buf); 1350 put_page(page); 1351 } 1352 } 1353 1354 /* Why not use xdp_build_skb_from_frame() ? 1355 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in 1356 * virtio-net there are 2 points that do not match its requirements: 1357 * 1. The size of the prefilled buffer is not fixed before xdp is set. 1358 * 2. xdp_build_skb_from_frame() does more checks that we don't need, 1359 * like eth_type_trans() (which virtio-net does in receive_buf()). 1360 */ 1361 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, 1362 struct virtnet_info *vi, 1363 struct xdp_buff *xdp, 1364 unsigned int xdp_frags_truesz) 1365 { 1366 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 1367 unsigned int headroom, data_len; 1368 struct sk_buff *skb; 1369 int metasize; 1370 u8 nr_frags; 1371 1372 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 1373 pr_debug("Error building skb as missing reserved tailroom for xdp"); 1374 return NULL; 1375 } 1376 1377 if (unlikely(xdp_buff_has_frags(xdp))) 1378 nr_frags = sinfo->nr_frags; 1379 1380 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); 1381 if (unlikely(!skb)) 1382 return NULL; 1383 1384 headroom = xdp->data - xdp->data_hard_start; 1385 data_len = xdp->data_end - xdp->data; 1386 skb_reserve(skb, headroom); 1387 __skb_put(skb, data_len); 1388 1389 metasize = xdp->data - xdp->data_meta; 1390 metasize = metasize > 0 ? metasize : 0; 1391 if (metasize) 1392 skb_metadata_set(skb, metasize); 1393 1394 if (unlikely(xdp_buff_has_frags(xdp))) 1395 xdp_update_skb_shared_info(skb, nr_frags, 1396 sinfo->xdp_frags_size, 1397 xdp_frags_truesz, 1398 xdp_buff_is_frag_pfmemalloc(xdp)); 1399 1400 return skb; 1401 } 1402 1403 /* TODO: build xdp in big mode */ 1404 static int virtnet_build_xdp_buff_mrg(struct net_device *dev, 1405 struct virtnet_info *vi, 1406 struct receive_queue *rq, 1407 struct xdp_buff *xdp, 1408 void *buf, 1409 unsigned int len, 1410 unsigned int frame_sz, 1411 int *num_buf, 1412 unsigned int *xdp_frags_truesize, 1413 struct virtnet_rq_stats *stats) 1414 { 1415 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1416 unsigned int headroom, tailroom, room; 1417 unsigned int truesize, cur_frag_size; 1418 struct skb_shared_info *shinfo; 1419 unsigned int xdp_frags_truesz = 0; 1420 struct page *page; 1421 skb_frag_t *frag; 1422 int offset; 1423 void *ctx; 1424 1425 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); 1426 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, 1427 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); 1428 1429 if (!*num_buf) 1430 return 0; 1431 1432 if (*num_buf > 1) { 1433 /* If we want to build multi-buffer xdp, we need 1434 * to specify that the flags of xdp_buff have the 1435 * XDP_FLAGS_HAS_FRAG bit. 1436 */ 1437 if (!xdp_buff_has_frags(xdp)) 1438 xdp_buff_set_frags_flag(xdp); 1439 1440 shinfo = xdp_get_shared_info_from_buff(xdp); 1441 shinfo->nr_frags = 0; 1442 shinfo->xdp_frags_size = 0; 1443 } 1444 1445 if (*num_buf > MAX_SKB_FRAGS + 1) 1446 return -EINVAL; 1447 1448 while (--*num_buf > 0) { 1449 buf = virtnet_rq_get_buf(rq, &len, &ctx); 1450 if (unlikely(!buf)) { 1451 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1452 dev->name, *num_buf, 1453 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); 1454 DEV_STATS_INC(dev, rx_length_errors); 1455 goto err; 1456 } 1457 1458 u64_stats_add(&stats->bytes, len); 1459 page = virt_to_head_page(buf); 1460 offset = buf - page_address(page); 1461 1462 truesize = mergeable_ctx_to_truesize(ctx); 1463 headroom = mergeable_ctx_to_headroom(ctx); 1464 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1465 room = SKB_DATA_ALIGN(headroom + tailroom); 1466 1467 cur_frag_size = truesize; 1468 xdp_frags_truesz += cur_frag_size; 1469 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { 1470 put_page(page); 1471 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1472 dev->name, len, (unsigned long)(truesize - room)); 1473 DEV_STATS_INC(dev, rx_length_errors); 1474 goto err; 1475 } 1476 1477 frag = &shinfo->frags[shinfo->nr_frags++]; 1478 skb_frag_fill_page_desc(frag, page, offset, len); 1479 if (page_is_pfmemalloc(page)) 1480 xdp_buff_set_frag_pfmemalloc(xdp); 1481 1482 shinfo->xdp_frags_size += len; 1483 } 1484 1485 *xdp_frags_truesize = xdp_frags_truesz; 1486 return 0; 1487 1488 err: 1489 put_xdp_frags(xdp); 1490 return -EINVAL; 1491 } 1492 1493 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, 1494 struct receive_queue *rq, 1495 struct bpf_prog *xdp_prog, 1496 void *ctx, 1497 unsigned int *frame_sz, 1498 int *num_buf, 1499 struct page **page, 1500 int offset, 1501 unsigned int *len, 1502 struct virtio_net_hdr_mrg_rxbuf *hdr) 1503 { 1504 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1505 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1506 struct page *xdp_page; 1507 unsigned int xdp_room; 1508 1509 /* Transient failure which in theory could occur if 1510 * in-flight packets from before XDP was enabled reach 1511 * the receive path after XDP is loaded. 1512 */ 1513 if (unlikely(hdr->hdr.gso_type)) 1514 return NULL; 1515 1516 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers 1517 * with headroom may add hole in truesize, which 1518 * make their length exceed PAGE_SIZE. So we disabled the 1519 * hole mechanism for xdp. See add_recvbuf_mergeable(). 1520 */ 1521 *frame_sz = truesize; 1522 1523 if (likely(headroom >= virtnet_get_headroom(vi) && 1524 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { 1525 return page_address(*page) + offset; 1526 } 1527 1528 /* This happens when headroom is not enough because 1529 * of the buffer was prefilled before XDP is set. 1530 * This should only happen for the first several packets. 1531 * In fact, vq reset can be used here to help us clean up 1532 * the prefilled buffers, but many existing devices do not 1533 * support it, and we don't want to bother users who are 1534 * using xdp normally. 1535 */ 1536 if (!xdp_prog->aux->xdp_has_frags) { 1537 /* linearize data for XDP */ 1538 xdp_page = xdp_linearize_page(rq, num_buf, 1539 *page, offset, 1540 VIRTIO_XDP_HEADROOM, 1541 len); 1542 if (!xdp_page) 1543 return NULL; 1544 } else { 1545 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 1546 sizeof(struct skb_shared_info)); 1547 if (*len + xdp_room > PAGE_SIZE) 1548 return NULL; 1549 1550 xdp_page = alloc_page(GFP_ATOMIC); 1551 if (!xdp_page) 1552 return NULL; 1553 1554 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, 1555 page_address(*page) + offset, *len); 1556 } 1557 1558 *frame_sz = PAGE_SIZE; 1559 1560 put_page(*page); 1561 1562 *page = xdp_page; 1563 1564 return page_address(*page) + VIRTIO_XDP_HEADROOM; 1565 } 1566 1567 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, 1568 struct virtnet_info *vi, 1569 struct receive_queue *rq, 1570 struct bpf_prog *xdp_prog, 1571 void *buf, 1572 void *ctx, 1573 unsigned int len, 1574 unsigned int *xdp_xmit, 1575 struct virtnet_rq_stats *stats) 1576 { 1577 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1578 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1579 struct page *page = virt_to_head_page(buf); 1580 int offset = buf - page_address(page); 1581 unsigned int xdp_frags_truesz = 0; 1582 struct sk_buff *head_skb; 1583 unsigned int frame_sz; 1584 struct xdp_buff xdp; 1585 void *data; 1586 u32 act; 1587 int err; 1588 1589 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, 1590 offset, &len, hdr); 1591 if (unlikely(!data)) 1592 goto err_xdp; 1593 1594 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, 1595 &num_buf, &xdp_frags_truesz, stats); 1596 if (unlikely(err)) 1597 goto err_xdp; 1598 1599 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1600 1601 switch (act) { 1602 case XDP_PASS: 1603 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); 1604 if (unlikely(!head_skb)) 1605 break; 1606 return head_skb; 1607 1608 case XDP_TX: 1609 case XDP_REDIRECT: 1610 return NULL; 1611 1612 default: 1613 break; 1614 } 1615 1616 put_xdp_frags(&xdp); 1617 1618 err_xdp: 1619 put_page(page); 1620 mergeable_buf_free(rq, num_buf, dev, stats); 1621 1622 u64_stats_inc(&stats->xdp_drops); 1623 u64_stats_inc(&stats->drops); 1624 return NULL; 1625 } 1626 1627 static struct sk_buff *receive_mergeable(struct net_device *dev, 1628 struct virtnet_info *vi, 1629 struct receive_queue *rq, 1630 void *buf, 1631 void *ctx, 1632 unsigned int len, 1633 unsigned int *xdp_xmit, 1634 struct virtnet_rq_stats *stats) 1635 { 1636 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 1637 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1638 struct page *page = virt_to_head_page(buf); 1639 int offset = buf - page_address(page); 1640 struct sk_buff *head_skb, *curr_skb; 1641 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 1642 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 1643 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1644 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1645 1646 head_skb = NULL; 1647 u64_stats_add(&stats->bytes, len - vi->hdr_len); 1648 1649 if (unlikely(len > truesize - room)) { 1650 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1651 dev->name, len, (unsigned long)(truesize - room)); 1652 DEV_STATS_INC(dev, rx_length_errors); 1653 goto err_skb; 1654 } 1655 1656 if (unlikely(vi->xdp_enabled)) { 1657 struct bpf_prog *xdp_prog; 1658 1659 rcu_read_lock(); 1660 xdp_prog = rcu_dereference(rq->xdp_prog); 1661 if (xdp_prog) { 1662 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, 1663 len, xdp_xmit, stats); 1664 rcu_read_unlock(); 1665 return head_skb; 1666 } 1667 rcu_read_unlock(); 1668 } 1669 1670 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); 1671 curr_skb = head_skb; 1672 1673 if (unlikely(!curr_skb)) 1674 goto err_skb; 1675 while (--num_buf) { 1676 int num_skb_frags; 1677 1678 buf = virtnet_rq_get_buf(rq, &len, &ctx); 1679 if (unlikely(!buf)) { 1680 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1681 dev->name, num_buf, 1682 virtio16_to_cpu(vi->vdev, 1683 hdr->num_buffers)); 1684 DEV_STATS_INC(dev, rx_length_errors); 1685 goto err_buf; 1686 } 1687 1688 u64_stats_add(&stats->bytes, len); 1689 page = virt_to_head_page(buf); 1690 1691 truesize = mergeable_ctx_to_truesize(ctx); 1692 headroom = mergeable_ctx_to_headroom(ctx); 1693 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1694 room = SKB_DATA_ALIGN(headroom + tailroom); 1695 if (unlikely(len > truesize - room)) { 1696 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 1697 dev->name, len, (unsigned long)(truesize - room)); 1698 DEV_STATS_INC(dev, rx_length_errors); 1699 goto err_skb; 1700 } 1701 1702 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 1703 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 1704 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 1705 1706 if (unlikely(!nskb)) 1707 goto err_skb; 1708 if (curr_skb == head_skb) 1709 skb_shinfo(curr_skb)->frag_list = nskb; 1710 else 1711 curr_skb->next = nskb; 1712 curr_skb = nskb; 1713 head_skb->truesize += nskb->truesize; 1714 num_skb_frags = 0; 1715 } 1716 if (curr_skb != head_skb) { 1717 head_skb->data_len += len; 1718 head_skb->len += len; 1719 head_skb->truesize += truesize; 1720 } 1721 offset = buf - page_address(page); 1722 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 1723 put_page(page); 1724 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 1725 len, truesize); 1726 } else { 1727 skb_add_rx_frag(curr_skb, num_skb_frags, page, 1728 offset, len, truesize); 1729 } 1730 } 1731 1732 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 1733 return head_skb; 1734 1735 err_skb: 1736 put_page(page); 1737 mergeable_buf_free(rq, num_buf, dev, stats); 1738 1739 err_buf: 1740 u64_stats_inc(&stats->drops); 1741 dev_kfree_skb(head_skb); 1742 return NULL; 1743 } 1744 1745 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 1746 struct sk_buff *skb) 1747 { 1748 enum pkt_hash_types rss_hash_type; 1749 1750 if (!hdr_hash || !skb) 1751 return; 1752 1753 switch (__le16_to_cpu(hdr_hash->hash_report)) { 1754 case VIRTIO_NET_HASH_REPORT_TCPv4: 1755 case VIRTIO_NET_HASH_REPORT_UDPv4: 1756 case VIRTIO_NET_HASH_REPORT_TCPv6: 1757 case VIRTIO_NET_HASH_REPORT_UDPv6: 1758 case VIRTIO_NET_HASH_REPORT_TCPv6_EX: 1759 case VIRTIO_NET_HASH_REPORT_UDPv6_EX: 1760 rss_hash_type = PKT_HASH_TYPE_L4; 1761 break; 1762 case VIRTIO_NET_HASH_REPORT_IPv4: 1763 case VIRTIO_NET_HASH_REPORT_IPv6: 1764 case VIRTIO_NET_HASH_REPORT_IPv6_EX: 1765 rss_hash_type = PKT_HASH_TYPE_L3; 1766 break; 1767 case VIRTIO_NET_HASH_REPORT_NONE: 1768 default: 1769 rss_hash_type = PKT_HASH_TYPE_NONE; 1770 } 1771 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); 1772 } 1773 1774 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 1775 void *buf, unsigned int len, void **ctx, 1776 unsigned int *xdp_xmit, 1777 struct virtnet_rq_stats *stats) 1778 { 1779 struct net_device *dev = vi->dev; 1780 struct sk_buff *skb; 1781 struct virtio_net_common_hdr *hdr; 1782 1783 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 1784 pr_debug("%s: short packet %i\n", dev->name, len); 1785 DEV_STATS_INC(dev, rx_length_errors); 1786 virtnet_rq_free_unused_buf(rq->vq, buf); 1787 return; 1788 } 1789 1790 if (vi->mergeable_rx_bufs) 1791 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, 1792 stats); 1793 else if (vi->big_packets) 1794 skb = receive_big(dev, vi, rq, buf, len, stats); 1795 else 1796 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); 1797 1798 if (unlikely(!skb)) 1799 return; 1800 1801 hdr = skb_vnet_common_hdr(skb); 1802 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 1803 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); 1804 1805 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 1806 skb->ip_summed = CHECKSUM_UNNECESSARY; 1807 1808 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 1809 virtio_is_little_endian(vi->vdev))) { 1810 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 1811 dev->name, hdr->hdr.gso_type, 1812 hdr->hdr.gso_size); 1813 goto frame_err; 1814 } 1815 1816 skb_record_rx_queue(skb, vq2rxq(rq->vq)); 1817 skb->protocol = eth_type_trans(skb, dev); 1818 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 1819 ntohs(skb->protocol), skb->len, skb->pkt_type); 1820 1821 napi_gro_receive(&rq->napi, skb); 1822 return; 1823 1824 frame_err: 1825 DEV_STATS_INC(dev, rx_frame_errors); 1826 dev_kfree_skb(skb); 1827 } 1828 1829 /* Unlike mergeable buffers, all buffers are allocated to the 1830 * same size, except for the headroom. For this reason we do 1831 * not need to use mergeable_len_to_ctx here - it is enough 1832 * to store the headroom as the context ignoring the truesize. 1833 */ 1834 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 1835 gfp_t gfp) 1836 { 1837 char *buf; 1838 unsigned int xdp_headroom = virtnet_get_headroom(vi); 1839 void *ctx = (void *)(unsigned long)xdp_headroom; 1840 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 1841 int err; 1842 1843 len = SKB_DATA_ALIGN(len) + 1844 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1845 1846 buf = virtnet_rq_alloc(rq, len, gfp); 1847 if (unlikely(!buf)) 1848 return -ENOMEM; 1849 1850 virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, 1851 vi->hdr_len + GOOD_PACKET_LEN); 1852 1853 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1854 if (err < 0) { 1855 if (rq->do_dma) 1856 virtnet_rq_unmap(rq, buf, 0); 1857 put_page(virt_to_head_page(buf)); 1858 } 1859 1860 return err; 1861 } 1862 1863 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 1864 gfp_t gfp) 1865 { 1866 struct page *first, *list = NULL; 1867 char *p; 1868 int i, err, offset; 1869 1870 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); 1871 1872 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ 1873 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { 1874 first = get_a_page(rq, gfp); 1875 if (!first) { 1876 if (list) 1877 give_pages(rq, list); 1878 return -ENOMEM; 1879 } 1880 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 1881 1882 /* chain new page in list head to match sg */ 1883 first->private = (unsigned long)list; 1884 list = first; 1885 } 1886 1887 first = get_a_page(rq, gfp); 1888 if (!first) { 1889 give_pages(rq, list); 1890 return -ENOMEM; 1891 } 1892 p = page_address(first); 1893 1894 /* rq->sg[0], rq->sg[1] share the same page */ 1895 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 1896 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 1897 1898 /* rq->sg[1] for data packet, from offset */ 1899 offset = sizeof(struct padded_vnet_hdr); 1900 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 1901 1902 /* chain first in list head */ 1903 first->private = (unsigned long)list; 1904 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, 1905 first, gfp); 1906 if (err < 0) 1907 give_pages(rq, first); 1908 1909 return err; 1910 } 1911 1912 static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1913 struct ewma_pkt_len *avg_pkt_len, 1914 unsigned int room) 1915 { 1916 struct virtnet_info *vi = rq->vq->vdev->priv; 1917 const size_t hdr_len = vi->hdr_len; 1918 unsigned int len; 1919 1920 if (room) 1921 return PAGE_SIZE - room; 1922 1923 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1924 rq->min_buf_len, PAGE_SIZE - hdr_len); 1925 1926 return ALIGN(len, L1_CACHE_BYTES); 1927 } 1928 1929 static int add_recvbuf_mergeable(struct virtnet_info *vi, 1930 struct receive_queue *rq, gfp_t gfp) 1931 { 1932 struct page_frag *alloc_frag = &rq->alloc_frag; 1933 unsigned int headroom = virtnet_get_headroom(vi); 1934 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 1935 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 1936 unsigned int len, hole; 1937 void *ctx; 1938 char *buf; 1939 int err; 1940 1941 /* Extra tailroom is needed to satisfy XDP's assumption. This 1942 * means rx frags coalescing won't work, but consider we've 1943 * disabled GSO for XDP, it won't be a big issue. 1944 */ 1945 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 1946 1947 buf = virtnet_rq_alloc(rq, len + room, gfp); 1948 if (unlikely(!buf)) 1949 return -ENOMEM; 1950 1951 buf += headroom; /* advance address leaving hole at front of pkt */ 1952 hole = alloc_frag->size - alloc_frag->offset; 1953 if (hole < len + room) { 1954 /* To avoid internal fragmentation, if there is very likely not 1955 * enough space for another buffer, add the remaining space to 1956 * the current buffer. 1957 * XDP core assumes that frame_size of xdp_buff and the length 1958 * of the frag are PAGE_SIZE, so we disable the hole mechanism. 1959 */ 1960 if (!headroom) 1961 len += hole; 1962 alloc_frag->offset += hole; 1963 } 1964 1965 virtnet_rq_init_one_sg(rq, buf, len); 1966 1967 ctx = mergeable_len_to_ctx(len + room, headroom); 1968 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 1969 if (err < 0) { 1970 if (rq->do_dma) 1971 virtnet_rq_unmap(rq, buf, 0); 1972 put_page(virt_to_head_page(buf)); 1973 } 1974 1975 return err; 1976 } 1977 1978 /* 1979 * Returns false if we couldn't fill entirely (OOM). 1980 * 1981 * Normally run in the receive path, but can also be run from ndo_open 1982 * before we're receiving packets, or from refill_work which is 1983 * careful to disable receiving (using napi_disable). 1984 */ 1985 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 1986 gfp_t gfp) 1987 { 1988 int err; 1989 bool oom; 1990 1991 do { 1992 if (vi->mergeable_rx_bufs) 1993 err = add_recvbuf_mergeable(vi, rq, gfp); 1994 else if (vi->big_packets) 1995 err = add_recvbuf_big(vi, rq, gfp); 1996 else 1997 err = add_recvbuf_small(vi, rq, gfp); 1998 1999 oom = err == -ENOMEM; 2000 if (err) 2001 break; 2002 } while (rq->vq->num_free); 2003 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { 2004 unsigned long flags; 2005 2006 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); 2007 u64_stats_inc(&rq->stats.kicks); 2008 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); 2009 } 2010 2011 return !oom; 2012 } 2013 2014 static void skb_recv_done(struct virtqueue *rvq) 2015 { 2016 struct virtnet_info *vi = rvq->vdev->priv; 2017 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 2018 2019 rq->calls++; 2020 virtqueue_napi_schedule(&rq->napi, rvq); 2021 } 2022 2023 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) 2024 { 2025 napi_enable(napi); 2026 2027 /* If all buffers were filled by other side before we napi_enabled, we 2028 * won't get another interrupt, so process any outstanding packets now. 2029 * Call local_bh_enable after to trigger softIRQ processing. 2030 */ 2031 local_bh_disable(); 2032 virtqueue_napi_schedule(napi, vq); 2033 local_bh_enable(); 2034 } 2035 2036 static void virtnet_napi_tx_enable(struct virtnet_info *vi, 2037 struct virtqueue *vq, 2038 struct napi_struct *napi) 2039 { 2040 if (!napi->weight) 2041 return; 2042 2043 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 2044 * enable the feature if this is likely affine with the transmit path. 2045 */ 2046 if (!vi->affinity_hint_set) { 2047 napi->weight = 0; 2048 return; 2049 } 2050 2051 return virtnet_napi_enable(vq, napi); 2052 } 2053 2054 static void virtnet_napi_tx_disable(struct napi_struct *napi) 2055 { 2056 if (napi->weight) 2057 napi_disable(napi); 2058 } 2059 2060 static void refill_work(struct work_struct *work) 2061 { 2062 struct virtnet_info *vi = 2063 container_of(work, struct virtnet_info, refill.work); 2064 bool still_empty; 2065 int i; 2066 2067 for (i = 0; i < vi->curr_queue_pairs; i++) { 2068 struct receive_queue *rq = &vi->rq[i]; 2069 2070 napi_disable(&rq->napi); 2071 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 2072 virtnet_napi_enable(rq->vq, &rq->napi); 2073 2074 /* In theory, this can happen: if we don't get any buffers in 2075 * we will *never* try to fill again. 2076 */ 2077 if (still_empty) 2078 schedule_delayed_work(&vi->refill, HZ/2); 2079 } 2080 } 2081 2082 static int virtnet_receive(struct receive_queue *rq, int budget, 2083 unsigned int *xdp_xmit) 2084 { 2085 struct virtnet_info *vi = rq->vq->vdev->priv; 2086 struct virtnet_rq_stats stats = {}; 2087 unsigned int len; 2088 int packets = 0; 2089 void *buf; 2090 int i; 2091 2092 if (!vi->big_packets || vi->mergeable_rx_bufs) { 2093 void *ctx; 2094 2095 while (packets < budget && 2096 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { 2097 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); 2098 packets++; 2099 } 2100 } else { 2101 while (packets < budget && 2102 (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { 2103 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); 2104 packets++; 2105 } 2106 } 2107 2108 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { 2109 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { 2110 spin_lock(&vi->refill_lock); 2111 if (vi->refill_enabled) 2112 schedule_delayed_work(&vi->refill, 0); 2113 spin_unlock(&vi->refill_lock); 2114 } 2115 } 2116 2117 u64_stats_set(&stats.packets, packets); 2118 u64_stats_update_begin(&rq->stats.syncp); 2119 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { 2120 size_t offset = virtnet_rq_stats_desc[i].offset; 2121 u64_stats_t *item, *src; 2122 2123 item = (u64_stats_t *)((u8 *)&rq->stats + offset); 2124 src = (u64_stats_t *)((u8 *)&stats + offset); 2125 u64_stats_add(item, u64_stats_read(src)); 2126 } 2127 u64_stats_update_end(&rq->stats.syncp); 2128 2129 return packets; 2130 } 2131 2132 static void virtnet_poll_cleantx(struct receive_queue *rq) 2133 { 2134 struct virtnet_info *vi = rq->vq->vdev->priv; 2135 unsigned int index = vq2rxq(rq->vq); 2136 struct send_queue *sq = &vi->sq[index]; 2137 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 2138 2139 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 2140 return; 2141 2142 if (__netif_tx_trylock(txq)) { 2143 if (sq->reset) { 2144 __netif_tx_unlock(txq); 2145 return; 2146 } 2147 2148 do { 2149 virtqueue_disable_cb(sq->vq); 2150 free_old_xmit_skbs(sq, true); 2151 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 2152 2153 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 2154 netif_tx_wake_queue(txq); 2155 2156 __netif_tx_unlock(txq); 2157 } 2158 } 2159 2160 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) 2161 { 2162 struct dim_sample cur_sample = {}; 2163 2164 if (!rq->packets_in_napi) 2165 return; 2166 2167 u64_stats_update_begin(&rq->stats.syncp); 2168 dim_update_sample(rq->calls, 2169 u64_stats_read(&rq->stats.packets), 2170 u64_stats_read(&rq->stats.bytes), 2171 &cur_sample); 2172 u64_stats_update_end(&rq->stats.syncp); 2173 2174 net_dim(&rq->dim, cur_sample); 2175 rq->packets_in_napi = 0; 2176 } 2177 2178 static int virtnet_poll(struct napi_struct *napi, int budget) 2179 { 2180 struct receive_queue *rq = 2181 container_of(napi, struct receive_queue, napi); 2182 struct virtnet_info *vi = rq->vq->vdev->priv; 2183 struct send_queue *sq; 2184 unsigned int received; 2185 unsigned int xdp_xmit = 0; 2186 bool napi_complete; 2187 2188 virtnet_poll_cleantx(rq); 2189 2190 received = virtnet_receive(rq, budget, &xdp_xmit); 2191 rq->packets_in_napi += received; 2192 2193 if (xdp_xmit & VIRTIO_XDP_REDIR) 2194 xdp_do_flush(); 2195 2196 /* Out of packets? */ 2197 if (received < budget) { 2198 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); 2199 if (napi_complete && rq->dim_enabled) 2200 virtnet_rx_dim_update(vi, rq); 2201 } 2202 2203 if (xdp_xmit & VIRTIO_XDP_TX) { 2204 sq = virtnet_xdp_get_sq(vi); 2205 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 2206 u64_stats_update_begin(&sq->stats.syncp); 2207 u64_stats_inc(&sq->stats.kicks); 2208 u64_stats_update_end(&sq->stats.syncp); 2209 } 2210 virtnet_xdp_put_sq(vi, sq); 2211 } 2212 2213 return received; 2214 } 2215 2216 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) 2217 { 2218 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); 2219 napi_disable(&vi->rq[qp_index].napi); 2220 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 2221 } 2222 2223 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) 2224 { 2225 struct net_device *dev = vi->dev; 2226 int err; 2227 2228 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, 2229 vi->rq[qp_index].napi.napi_id); 2230 if (err < 0) 2231 return err; 2232 2233 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, 2234 MEM_TYPE_PAGE_SHARED, NULL); 2235 if (err < 0) 2236 goto err_xdp_reg_mem_model; 2237 2238 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); 2239 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); 2240 2241 return 0; 2242 2243 err_xdp_reg_mem_model: 2244 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 2245 return err; 2246 } 2247 2248 static int virtnet_open(struct net_device *dev) 2249 { 2250 struct virtnet_info *vi = netdev_priv(dev); 2251 int i, err; 2252 2253 enable_delayed_refill(vi); 2254 2255 for (i = 0; i < vi->max_queue_pairs; i++) { 2256 if (i < vi->curr_queue_pairs) 2257 /* Make sure we have some buffers: if oom use wq. */ 2258 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 2259 schedule_delayed_work(&vi->refill, 0); 2260 2261 err = virtnet_enable_queue_pair(vi, i); 2262 if (err < 0) 2263 goto err_enable_qp; 2264 } 2265 2266 return 0; 2267 2268 err_enable_qp: 2269 disable_delayed_refill(vi); 2270 cancel_delayed_work_sync(&vi->refill); 2271 2272 for (i--; i >= 0; i--) { 2273 virtnet_disable_queue_pair(vi, i); 2274 cancel_work_sync(&vi->rq[i].dim.work); 2275 } 2276 2277 return err; 2278 } 2279 2280 static int virtnet_poll_tx(struct napi_struct *napi, int budget) 2281 { 2282 struct send_queue *sq = container_of(napi, struct send_queue, napi); 2283 struct virtnet_info *vi = sq->vq->vdev->priv; 2284 unsigned int index = vq2txq(sq->vq); 2285 struct netdev_queue *txq; 2286 int opaque; 2287 bool done; 2288 2289 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 2290 /* We don't need to enable cb for XDP */ 2291 napi_complete_done(napi, 0); 2292 return 0; 2293 } 2294 2295 txq = netdev_get_tx_queue(vi->dev, index); 2296 __netif_tx_lock(txq, raw_smp_processor_id()); 2297 virtqueue_disable_cb(sq->vq); 2298 free_old_xmit_skbs(sq, true); 2299 2300 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) 2301 netif_tx_wake_queue(txq); 2302 2303 opaque = virtqueue_enable_cb_prepare(sq->vq); 2304 2305 done = napi_complete_done(napi, 0); 2306 2307 if (!done) 2308 virtqueue_disable_cb(sq->vq); 2309 2310 __netif_tx_unlock(txq); 2311 2312 if (done) { 2313 if (unlikely(virtqueue_poll(sq->vq, opaque))) { 2314 if (napi_schedule_prep(napi)) { 2315 __netif_tx_lock(txq, raw_smp_processor_id()); 2316 virtqueue_disable_cb(sq->vq); 2317 __netif_tx_unlock(txq); 2318 __napi_schedule(napi); 2319 } 2320 } 2321 } 2322 2323 return 0; 2324 } 2325 2326 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) 2327 { 2328 struct virtio_net_hdr_mrg_rxbuf *hdr; 2329 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 2330 struct virtnet_info *vi = sq->vq->vdev->priv; 2331 int num_sg; 2332 unsigned hdr_len = vi->hdr_len; 2333 bool can_push; 2334 2335 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 2336 2337 can_push = vi->any_header_sg && 2338 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 2339 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 2340 /* Even if we can, don't push here yet as this would skew 2341 * csum_start offset below. */ 2342 if (can_push) 2343 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 2344 else 2345 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; 2346 2347 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 2348 virtio_is_little_endian(vi->vdev), false, 2349 0)) 2350 return -EPROTO; 2351 2352 if (vi->mergeable_rx_bufs) 2353 hdr->num_buffers = 0; 2354 2355 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 2356 if (can_push) { 2357 __skb_push(skb, hdr_len); 2358 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 2359 if (unlikely(num_sg < 0)) 2360 return num_sg; 2361 /* Pull header back to avoid skew in tx bytes calculations. */ 2362 __skb_pull(skb, hdr_len); 2363 } else { 2364 sg_set_buf(sq->sg, hdr, hdr_len); 2365 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 2366 if (unlikely(num_sg < 0)) 2367 return num_sg; 2368 num_sg++; 2369 } 2370 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); 2371 } 2372 2373 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 2374 { 2375 struct virtnet_info *vi = netdev_priv(dev); 2376 int qnum = skb_get_queue_mapping(skb); 2377 struct send_queue *sq = &vi->sq[qnum]; 2378 int err; 2379 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 2380 bool kick = !netdev_xmit_more(); 2381 bool use_napi = sq->napi.weight; 2382 2383 /* Free up any pending old buffers before queueing new ones. */ 2384 do { 2385 if (use_napi) 2386 virtqueue_disable_cb(sq->vq); 2387 2388 free_old_xmit_skbs(sq, false); 2389 2390 } while (use_napi && kick && 2391 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 2392 2393 /* timestamp packet in software */ 2394 skb_tx_timestamp(skb); 2395 2396 /* Try to transmit */ 2397 err = xmit_skb(sq, skb); 2398 2399 /* This should not happen! */ 2400 if (unlikely(err)) { 2401 DEV_STATS_INC(dev, tx_fifo_errors); 2402 if (net_ratelimit()) 2403 dev_warn(&dev->dev, 2404 "Unexpected TXQ (%d) queue failure: %d\n", 2405 qnum, err); 2406 DEV_STATS_INC(dev, tx_dropped); 2407 dev_kfree_skb_any(skb); 2408 return NETDEV_TX_OK; 2409 } 2410 2411 /* Don't wait up for transmitted skbs to be freed. */ 2412 if (!use_napi) { 2413 skb_orphan(skb); 2414 nf_reset_ct(skb); 2415 } 2416 2417 check_sq_full_and_disable(vi, dev, sq); 2418 2419 if (kick || netif_xmit_stopped(txq)) { 2420 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 2421 u64_stats_update_begin(&sq->stats.syncp); 2422 u64_stats_inc(&sq->stats.kicks); 2423 u64_stats_update_end(&sq->stats.syncp); 2424 } 2425 } 2426 2427 return NETDEV_TX_OK; 2428 } 2429 2430 static int virtnet_rx_resize(struct virtnet_info *vi, 2431 struct receive_queue *rq, u32 ring_num) 2432 { 2433 bool running = netif_running(vi->dev); 2434 int err, qindex; 2435 2436 qindex = rq - vi->rq; 2437 2438 if (running) { 2439 napi_disable(&rq->napi); 2440 cancel_work_sync(&rq->dim.work); 2441 } 2442 2443 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); 2444 if (err) 2445 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); 2446 2447 if (!try_fill_recv(vi, rq, GFP_KERNEL)) 2448 schedule_delayed_work(&vi->refill, 0); 2449 2450 if (running) 2451 virtnet_napi_enable(rq->vq, &rq->napi); 2452 return err; 2453 } 2454 2455 static int virtnet_tx_resize(struct virtnet_info *vi, 2456 struct send_queue *sq, u32 ring_num) 2457 { 2458 bool running = netif_running(vi->dev); 2459 struct netdev_queue *txq; 2460 int err, qindex; 2461 2462 qindex = sq - vi->sq; 2463 2464 if (running) 2465 virtnet_napi_tx_disable(&sq->napi); 2466 2467 txq = netdev_get_tx_queue(vi->dev, qindex); 2468 2469 /* 1. wait all ximt complete 2470 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() 2471 */ 2472 __netif_tx_lock_bh(txq); 2473 2474 /* Prevent rx poll from accessing sq. */ 2475 sq->reset = true; 2476 2477 /* Prevent the upper layer from trying to send packets. */ 2478 netif_stop_subqueue(vi->dev, qindex); 2479 2480 __netif_tx_unlock_bh(txq); 2481 2482 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); 2483 if (err) 2484 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); 2485 2486 __netif_tx_lock_bh(txq); 2487 sq->reset = false; 2488 netif_tx_wake_queue(txq); 2489 __netif_tx_unlock_bh(txq); 2490 2491 if (running) 2492 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); 2493 return err; 2494 } 2495 2496 /* 2497 * Send command via the control virtqueue and check status. Commands 2498 * supported by the hypervisor, as indicated by feature bits, should 2499 * never fail unless improperly formatted. 2500 */ 2501 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 2502 struct scatterlist *out) 2503 { 2504 struct scatterlist *sgs[4], hdr, stat; 2505 unsigned out_num = 0, tmp; 2506 int ret; 2507 2508 /* Caller should know better */ 2509 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 2510 2511 vi->ctrl->status = ~0; 2512 vi->ctrl->hdr.class = class; 2513 vi->ctrl->hdr.cmd = cmd; 2514 /* Add header */ 2515 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 2516 sgs[out_num++] = &hdr; 2517 2518 if (out) 2519 sgs[out_num++] = out; 2520 2521 /* Add return status. */ 2522 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 2523 sgs[out_num] = &stat; 2524 2525 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 2526 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 2527 if (ret < 0) { 2528 dev_warn(&vi->vdev->dev, 2529 "Failed to add sgs for command vq: %d\n.", ret); 2530 return false; 2531 } 2532 2533 if (unlikely(!virtqueue_kick(vi->cvq))) 2534 return vi->ctrl->status == VIRTIO_NET_OK; 2535 2536 /* Spin for a response, the kick causes an ioport write, trapping 2537 * into the hypervisor, so the request should be handled immediately. 2538 */ 2539 while (!virtqueue_get_buf(vi->cvq, &tmp) && 2540 !virtqueue_is_broken(vi->cvq)) 2541 cpu_relax(); 2542 2543 return vi->ctrl->status == VIRTIO_NET_OK; 2544 } 2545 2546 static int virtnet_set_mac_address(struct net_device *dev, void *p) 2547 { 2548 struct virtnet_info *vi = netdev_priv(dev); 2549 struct virtio_device *vdev = vi->vdev; 2550 int ret; 2551 struct sockaddr *addr; 2552 struct scatterlist sg; 2553 2554 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 2555 return -EOPNOTSUPP; 2556 2557 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 2558 if (!addr) 2559 return -ENOMEM; 2560 2561 ret = eth_prepare_mac_addr_change(dev, addr); 2562 if (ret) 2563 goto out; 2564 2565 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 2566 sg_init_one(&sg, addr->sa_data, dev->addr_len); 2567 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2568 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 2569 dev_warn(&vdev->dev, 2570 "Failed to set mac address by vq command.\n"); 2571 ret = -EINVAL; 2572 goto out; 2573 } 2574 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 2575 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 2576 unsigned int i; 2577 2578 /* Naturally, this has an atomicity problem. */ 2579 for (i = 0; i < dev->addr_len; i++) 2580 virtio_cwrite8(vdev, 2581 offsetof(struct virtio_net_config, mac) + 2582 i, addr->sa_data[i]); 2583 } 2584 2585 eth_commit_mac_addr_change(dev, p); 2586 ret = 0; 2587 2588 out: 2589 kfree(addr); 2590 return ret; 2591 } 2592 2593 static void virtnet_stats(struct net_device *dev, 2594 struct rtnl_link_stats64 *tot) 2595 { 2596 struct virtnet_info *vi = netdev_priv(dev); 2597 unsigned int start; 2598 int i; 2599 2600 for (i = 0; i < vi->max_queue_pairs; i++) { 2601 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; 2602 struct receive_queue *rq = &vi->rq[i]; 2603 struct send_queue *sq = &vi->sq[i]; 2604 2605 do { 2606 start = u64_stats_fetch_begin(&sq->stats.syncp); 2607 tpackets = u64_stats_read(&sq->stats.packets); 2608 tbytes = u64_stats_read(&sq->stats.bytes); 2609 terrors = u64_stats_read(&sq->stats.tx_timeouts); 2610 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 2611 2612 do { 2613 start = u64_stats_fetch_begin(&rq->stats.syncp); 2614 rpackets = u64_stats_read(&rq->stats.packets); 2615 rbytes = u64_stats_read(&rq->stats.bytes); 2616 rdrops = u64_stats_read(&rq->stats.drops); 2617 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 2618 2619 tot->rx_packets += rpackets; 2620 tot->tx_packets += tpackets; 2621 tot->rx_bytes += rbytes; 2622 tot->tx_bytes += tbytes; 2623 tot->rx_dropped += rdrops; 2624 tot->tx_errors += terrors; 2625 } 2626 2627 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); 2628 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); 2629 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); 2630 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); 2631 } 2632 2633 static void virtnet_ack_link_announce(struct virtnet_info *vi) 2634 { 2635 rtnl_lock(); 2636 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 2637 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 2638 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 2639 rtnl_unlock(); 2640 } 2641 2642 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2643 { 2644 struct scatterlist sg; 2645 struct net_device *dev = vi->dev; 2646 2647 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 2648 return 0; 2649 2650 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 2651 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 2652 2653 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 2654 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 2655 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 2656 queue_pairs); 2657 return -EINVAL; 2658 } else { 2659 vi->curr_queue_pairs = queue_pairs; 2660 /* virtnet_open() will refill when device is going to up. */ 2661 if (dev->flags & IFF_UP) 2662 schedule_delayed_work(&vi->refill, 0); 2663 } 2664 2665 return 0; 2666 } 2667 2668 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 2669 { 2670 int err; 2671 2672 rtnl_lock(); 2673 err = _virtnet_set_queues(vi, queue_pairs); 2674 rtnl_unlock(); 2675 return err; 2676 } 2677 2678 static int virtnet_close(struct net_device *dev) 2679 { 2680 struct virtnet_info *vi = netdev_priv(dev); 2681 int i; 2682 2683 /* Make sure NAPI doesn't schedule refill work */ 2684 disable_delayed_refill(vi); 2685 /* Make sure refill_work doesn't re-enable napi! */ 2686 cancel_delayed_work_sync(&vi->refill); 2687 2688 for (i = 0; i < vi->max_queue_pairs; i++) { 2689 virtnet_disable_queue_pair(vi, i); 2690 cancel_work_sync(&vi->rq[i].dim.work); 2691 } 2692 2693 return 0; 2694 } 2695 2696 static void virtnet_set_rx_mode(struct net_device *dev) 2697 { 2698 struct virtnet_info *vi = netdev_priv(dev); 2699 struct scatterlist sg[2]; 2700 struct virtio_net_ctrl_mac *mac_data; 2701 struct netdev_hw_addr *ha; 2702 int uc_count; 2703 int mc_count; 2704 void *buf; 2705 int i; 2706 2707 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 2708 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 2709 return; 2710 2711 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 2712 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 2713 2714 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 2715 2716 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2717 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 2718 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 2719 vi->ctrl->promisc ? "en" : "dis"); 2720 2721 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 2722 2723 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 2724 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 2725 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 2726 vi->ctrl->allmulti ? "en" : "dis"); 2727 2728 uc_count = netdev_uc_count(dev); 2729 mc_count = netdev_mc_count(dev); 2730 /* MAC filter - use one buffer for both lists */ 2731 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 2732 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 2733 mac_data = buf; 2734 if (!buf) 2735 return; 2736 2737 sg_init_table(sg, 2); 2738 2739 /* Store the unicast list and count in the front of the buffer */ 2740 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 2741 i = 0; 2742 netdev_for_each_uc_addr(ha, dev) 2743 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2744 2745 sg_set_buf(&sg[0], mac_data, 2746 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 2747 2748 /* multicast list and count fill the end */ 2749 mac_data = (void *)&mac_data->macs[uc_count][0]; 2750 2751 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 2752 i = 0; 2753 netdev_for_each_mc_addr(ha, dev) 2754 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 2755 2756 sg_set_buf(&sg[1], mac_data, 2757 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 2758 2759 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 2760 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 2761 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 2762 2763 kfree(buf); 2764 } 2765 2766 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 2767 __be16 proto, u16 vid) 2768 { 2769 struct virtnet_info *vi = netdev_priv(dev); 2770 struct scatterlist sg; 2771 2772 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2773 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2774 2775 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2776 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 2777 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 2778 return 0; 2779 } 2780 2781 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 2782 __be16 proto, u16 vid) 2783 { 2784 struct virtnet_info *vi = netdev_priv(dev); 2785 struct scatterlist sg; 2786 2787 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 2788 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 2789 2790 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 2791 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 2792 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 2793 return 0; 2794 } 2795 2796 static void virtnet_clean_affinity(struct virtnet_info *vi) 2797 { 2798 int i; 2799 2800 if (vi->affinity_hint_set) { 2801 for (i = 0; i < vi->max_queue_pairs; i++) { 2802 virtqueue_set_affinity(vi->rq[i].vq, NULL); 2803 virtqueue_set_affinity(vi->sq[i].vq, NULL); 2804 } 2805 2806 vi->affinity_hint_set = false; 2807 } 2808 } 2809 2810 static void virtnet_set_affinity(struct virtnet_info *vi) 2811 { 2812 cpumask_var_t mask; 2813 int stragglers; 2814 int group_size; 2815 int i, j, cpu; 2816 int num_cpu; 2817 int stride; 2818 2819 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 2820 virtnet_clean_affinity(vi); 2821 return; 2822 } 2823 2824 num_cpu = num_online_cpus(); 2825 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); 2826 stragglers = num_cpu >= vi->curr_queue_pairs ? 2827 num_cpu % vi->curr_queue_pairs : 2828 0; 2829 cpu = cpumask_first(cpu_online_mask); 2830 2831 for (i = 0; i < vi->curr_queue_pairs; i++) { 2832 group_size = stride + (i < stragglers ? 1 : 0); 2833 2834 for (j = 0; j < group_size; j++) { 2835 cpumask_set_cpu(cpu, mask); 2836 cpu = cpumask_next_wrap(cpu, cpu_online_mask, 2837 nr_cpu_ids, false); 2838 } 2839 virtqueue_set_affinity(vi->rq[i].vq, mask); 2840 virtqueue_set_affinity(vi->sq[i].vq, mask); 2841 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); 2842 cpumask_clear(mask); 2843 } 2844 2845 vi->affinity_hint_set = true; 2846 free_cpumask_var(mask); 2847 } 2848 2849 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 2850 { 2851 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2852 node); 2853 virtnet_set_affinity(vi); 2854 return 0; 2855 } 2856 2857 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 2858 { 2859 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2860 node_dead); 2861 virtnet_set_affinity(vi); 2862 return 0; 2863 } 2864 2865 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 2866 { 2867 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 2868 node); 2869 2870 virtnet_clean_affinity(vi); 2871 return 0; 2872 } 2873 2874 static enum cpuhp_state virtionet_online; 2875 2876 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 2877 { 2878 int ret; 2879 2880 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 2881 if (ret) 2882 return ret; 2883 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2884 &vi->node_dead); 2885 if (!ret) 2886 return ret; 2887 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2888 return ret; 2889 } 2890 2891 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 2892 { 2893 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 2894 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 2895 &vi->node_dead); 2896 } 2897 2898 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2899 u16 vqn, u32 max_usecs, u32 max_packets) 2900 { 2901 struct scatterlist sgs; 2902 2903 vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); 2904 vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); 2905 vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); 2906 sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); 2907 2908 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 2909 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, 2910 &sgs)) 2911 return -EINVAL; 2912 2913 return 0; 2914 } 2915 2916 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2917 u16 queue, u32 max_usecs, 2918 u32 max_packets) 2919 { 2920 int err; 2921 2922 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), 2923 max_usecs, max_packets); 2924 if (err) 2925 return err; 2926 2927 vi->rq[queue].intr_coal.max_usecs = max_usecs; 2928 vi->rq[queue].intr_coal.max_packets = max_packets; 2929 2930 return 0; 2931 } 2932 2933 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2934 u16 queue, u32 max_usecs, 2935 u32 max_packets) 2936 { 2937 int err; 2938 2939 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), 2940 max_usecs, max_packets); 2941 if (err) 2942 return err; 2943 2944 vi->sq[queue].intr_coal.max_usecs = max_usecs; 2945 vi->sq[queue].intr_coal.max_packets = max_packets; 2946 2947 return 0; 2948 } 2949 2950 static void virtnet_get_ringparam(struct net_device *dev, 2951 struct ethtool_ringparam *ring, 2952 struct kernel_ethtool_ringparam *kernel_ring, 2953 struct netlink_ext_ack *extack) 2954 { 2955 struct virtnet_info *vi = netdev_priv(dev); 2956 2957 ring->rx_max_pending = vi->rq[0].vq->num_max; 2958 ring->tx_max_pending = vi->sq[0].vq->num_max; 2959 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2960 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2961 } 2962 2963 static int virtnet_set_ringparam(struct net_device *dev, 2964 struct ethtool_ringparam *ring, 2965 struct kernel_ethtool_ringparam *kernel_ring, 2966 struct netlink_ext_ack *extack) 2967 { 2968 struct virtnet_info *vi = netdev_priv(dev); 2969 u32 rx_pending, tx_pending; 2970 struct receive_queue *rq; 2971 struct send_queue *sq; 2972 int i, err; 2973 2974 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 2975 return -EINVAL; 2976 2977 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2978 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2979 2980 if (ring->rx_pending == rx_pending && 2981 ring->tx_pending == tx_pending) 2982 return 0; 2983 2984 if (ring->rx_pending > vi->rq[0].vq->num_max) 2985 return -EINVAL; 2986 2987 if (ring->tx_pending > vi->sq[0].vq->num_max) 2988 return -EINVAL; 2989 2990 for (i = 0; i < vi->max_queue_pairs; i++) { 2991 rq = vi->rq + i; 2992 sq = vi->sq + i; 2993 2994 if (ring->tx_pending != tx_pending) { 2995 err = virtnet_tx_resize(vi, sq, ring->tx_pending); 2996 if (err) 2997 return err; 2998 2999 /* Upon disabling and re-enabling a transmit virtqueue, the device must 3000 * set the coalescing parameters of the virtqueue to those configured 3001 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver 3002 * did not set any TX coalescing parameters, to 0. 3003 */ 3004 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, 3005 vi->intr_coal_tx.max_usecs, 3006 vi->intr_coal_tx.max_packets); 3007 if (err) 3008 return err; 3009 } 3010 3011 if (ring->rx_pending != rx_pending) { 3012 err = virtnet_rx_resize(vi, rq, ring->rx_pending); 3013 if (err) 3014 return err; 3015 3016 /* The reason is same as the transmit virtqueue reset */ 3017 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, 3018 vi->intr_coal_rx.max_usecs, 3019 vi->intr_coal_rx.max_packets); 3020 if (err) 3021 return err; 3022 } 3023 } 3024 3025 return 0; 3026 } 3027 3028 static bool virtnet_commit_rss_command(struct virtnet_info *vi) 3029 { 3030 struct net_device *dev = vi->dev; 3031 struct scatterlist sgs[4]; 3032 unsigned int sg_buf_size; 3033 3034 /* prepare sgs */ 3035 sg_init_table(sgs, 4); 3036 3037 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); 3038 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); 3039 3040 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); 3041 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); 3042 3043 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) 3044 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); 3045 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); 3046 3047 sg_buf_size = vi->rss_key_size; 3048 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); 3049 3050 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 3051 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG 3052 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { 3053 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); 3054 return false; 3055 } 3056 return true; 3057 } 3058 3059 static void virtnet_init_default_rss(struct virtnet_info *vi) 3060 { 3061 u32 indir_val = 0; 3062 int i = 0; 3063 3064 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; 3065 vi->rss_hash_types_saved = vi->rss_hash_types_supported; 3066 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size 3067 ? vi->rss_indir_table_size - 1 : 0; 3068 vi->ctrl->rss.unclassified_queue = 0; 3069 3070 for (; i < vi->rss_indir_table_size; ++i) { 3071 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); 3072 vi->ctrl->rss.indirection_table[i] = indir_val; 3073 } 3074 3075 vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; 3076 vi->ctrl->rss.hash_key_length = vi->rss_key_size; 3077 3078 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); 3079 } 3080 3081 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) 3082 { 3083 info->data = 0; 3084 switch (info->flow_type) { 3085 case TCP_V4_FLOW: 3086 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { 3087 info->data = RXH_IP_SRC | RXH_IP_DST | 3088 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3089 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 3090 info->data = RXH_IP_SRC | RXH_IP_DST; 3091 } 3092 break; 3093 case TCP_V6_FLOW: 3094 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { 3095 info->data = RXH_IP_SRC | RXH_IP_DST | 3096 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3097 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 3098 info->data = RXH_IP_SRC | RXH_IP_DST; 3099 } 3100 break; 3101 case UDP_V4_FLOW: 3102 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { 3103 info->data = RXH_IP_SRC | RXH_IP_DST | 3104 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3105 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 3106 info->data = RXH_IP_SRC | RXH_IP_DST; 3107 } 3108 break; 3109 case UDP_V6_FLOW: 3110 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { 3111 info->data = RXH_IP_SRC | RXH_IP_DST | 3112 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3113 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 3114 info->data = RXH_IP_SRC | RXH_IP_DST; 3115 } 3116 break; 3117 case IPV4_FLOW: 3118 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) 3119 info->data = RXH_IP_SRC | RXH_IP_DST; 3120 3121 break; 3122 case IPV6_FLOW: 3123 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) 3124 info->data = RXH_IP_SRC | RXH_IP_DST; 3125 3126 break; 3127 default: 3128 info->data = 0; 3129 break; 3130 } 3131 } 3132 3133 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info) 3134 { 3135 u32 new_hashtypes = vi->rss_hash_types_saved; 3136 bool is_disable = info->data & RXH_DISCARD; 3137 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); 3138 3139 /* supports only 'sd', 'sdfn' and 'r' */ 3140 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) 3141 return false; 3142 3143 switch (info->flow_type) { 3144 case TCP_V4_FLOW: 3145 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); 3146 if (!is_disable) 3147 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 3148 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); 3149 break; 3150 case UDP_V4_FLOW: 3151 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); 3152 if (!is_disable) 3153 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 3154 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); 3155 break; 3156 case IPV4_FLOW: 3157 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; 3158 if (!is_disable) 3159 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; 3160 break; 3161 case TCP_V6_FLOW: 3162 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); 3163 if (!is_disable) 3164 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 3165 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); 3166 break; 3167 case UDP_V6_FLOW: 3168 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); 3169 if (!is_disable) 3170 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 3171 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); 3172 break; 3173 case IPV6_FLOW: 3174 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; 3175 if (!is_disable) 3176 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; 3177 break; 3178 default: 3179 /* unsupported flow */ 3180 return false; 3181 } 3182 3183 /* if unsupported hashtype was set */ 3184 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) 3185 return false; 3186 3187 if (new_hashtypes != vi->rss_hash_types_saved) { 3188 vi->rss_hash_types_saved = new_hashtypes; 3189 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 3190 if (vi->dev->features & NETIF_F_RXHASH) 3191 return virtnet_commit_rss_command(vi); 3192 } 3193 3194 return true; 3195 } 3196 3197 static void virtnet_get_drvinfo(struct net_device *dev, 3198 struct ethtool_drvinfo *info) 3199 { 3200 struct virtnet_info *vi = netdev_priv(dev); 3201 struct virtio_device *vdev = vi->vdev; 3202 3203 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 3204 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 3205 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 3206 3207 } 3208 3209 /* TODO: Eliminate OOO packets during switching */ 3210 static int virtnet_set_channels(struct net_device *dev, 3211 struct ethtool_channels *channels) 3212 { 3213 struct virtnet_info *vi = netdev_priv(dev); 3214 u16 queue_pairs = channels->combined_count; 3215 int err; 3216 3217 /* We don't support separate rx/tx channels. 3218 * We don't allow setting 'other' channels. 3219 */ 3220 if (channels->rx_count || channels->tx_count || channels->other_count) 3221 return -EINVAL; 3222 3223 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 3224 return -EINVAL; 3225 3226 /* For now we don't support modifying channels while XDP is loaded 3227 * also when XDP is loaded all RX queues have XDP programs so we only 3228 * need to check a single RX queue. 3229 */ 3230 if (vi->rq[0].xdp_prog) 3231 return -EINVAL; 3232 3233 cpus_read_lock(); 3234 err = _virtnet_set_queues(vi, queue_pairs); 3235 if (err) { 3236 cpus_read_unlock(); 3237 goto err; 3238 } 3239 virtnet_set_affinity(vi); 3240 cpus_read_unlock(); 3241 3242 netif_set_real_num_tx_queues(dev, queue_pairs); 3243 netif_set_real_num_rx_queues(dev, queue_pairs); 3244 err: 3245 return err; 3246 } 3247 3248 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 3249 { 3250 struct virtnet_info *vi = netdev_priv(dev); 3251 unsigned int i, j; 3252 u8 *p = data; 3253 3254 switch (stringset) { 3255 case ETH_SS_STATS: 3256 for (i = 0; i < vi->curr_queue_pairs; i++) { 3257 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) 3258 ethtool_sprintf(&p, "rx_queue_%u_%s", i, 3259 virtnet_rq_stats_desc[j].desc); 3260 } 3261 3262 for (i = 0; i < vi->curr_queue_pairs; i++) { 3263 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) 3264 ethtool_sprintf(&p, "tx_queue_%u_%s", i, 3265 virtnet_sq_stats_desc[j].desc); 3266 } 3267 break; 3268 } 3269 } 3270 3271 static int virtnet_get_sset_count(struct net_device *dev, int sset) 3272 { 3273 struct virtnet_info *vi = netdev_priv(dev); 3274 3275 switch (sset) { 3276 case ETH_SS_STATS: 3277 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + 3278 VIRTNET_SQ_STATS_LEN); 3279 default: 3280 return -EOPNOTSUPP; 3281 } 3282 } 3283 3284 static void virtnet_get_ethtool_stats(struct net_device *dev, 3285 struct ethtool_stats *stats, u64 *data) 3286 { 3287 struct virtnet_info *vi = netdev_priv(dev); 3288 unsigned int idx = 0, start, i, j; 3289 const u8 *stats_base; 3290 const u64_stats_t *p; 3291 size_t offset; 3292 3293 for (i = 0; i < vi->curr_queue_pairs; i++) { 3294 struct receive_queue *rq = &vi->rq[i]; 3295 3296 stats_base = (const u8 *)&rq->stats; 3297 do { 3298 start = u64_stats_fetch_begin(&rq->stats.syncp); 3299 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { 3300 offset = virtnet_rq_stats_desc[j].offset; 3301 p = (const u64_stats_t *)(stats_base + offset); 3302 data[idx + j] = u64_stats_read(p); 3303 } 3304 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 3305 idx += VIRTNET_RQ_STATS_LEN; 3306 } 3307 3308 for (i = 0; i < vi->curr_queue_pairs; i++) { 3309 struct send_queue *sq = &vi->sq[i]; 3310 3311 stats_base = (const u8 *)&sq->stats; 3312 do { 3313 start = u64_stats_fetch_begin(&sq->stats.syncp); 3314 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { 3315 offset = virtnet_sq_stats_desc[j].offset; 3316 p = (const u64_stats_t *)(stats_base + offset); 3317 data[idx + j] = u64_stats_read(p); 3318 } 3319 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 3320 idx += VIRTNET_SQ_STATS_LEN; 3321 } 3322 } 3323 3324 static void virtnet_get_channels(struct net_device *dev, 3325 struct ethtool_channels *channels) 3326 { 3327 struct virtnet_info *vi = netdev_priv(dev); 3328 3329 channels->combined_count = vi->curr_queue_pairs; 3330 channels->max_combined = vi->max_queue_pairs; 3331 channels->max_other = 0; 3332 channels->rx_count = 0; 3333 channels->tx_count = 0; 3334 channels->other_count = 0; 3335 } 3336 3337 static int virtnet_set_link_ksettings(struct net_device *dev, 3338 const struct ethtool_link_ksettings *cmd) 3339 { 3340 struct virtnet_info *vi = netdev_priv(dev); 3341 3342 return ethtool_virtdev_set_link_ksettings(dev, cmd, 3343 &vi->speed, &vi->duplex); 3344 } 3345 3346 static int virtnet_get_link_ksettings(struct net_device *dev, 3347 struct ethtool_link_ksettings *cmd) 3348 { 3349 struct virtnet_info *vi = netdev_priv(dev); 3350 3351 cmd->base.speed = vi->speed; 3352 cmd->base.duplex = vi->duplex; 3353 cmd->base.port = PORT_OTHER; 3354 3355 return 0; 3356 } 3357 3358 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, 3359 struct ethtool_coalesce *ec) 3360 { 3361 struct scatterlist sgs_tx; 3362 int i; 3363 3364 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 3365 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 3366 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); 3367 3368 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3369 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, 3370 &sgs_tx)) 3371 return -EINVAL; 3372 3373 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; 3374 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; 3375 for (i = 0; i < vi->max_queue_pairs; i++) { 3376 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; 3377 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; 3378 } 3379 3380 return 0; 3381 } 3382 3383 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, 3384 struct ethtool_coalesce *ec) 3385 { 3386 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; 3387 struct scatterlist sgs_rx; 3388 int i; 3389 3390 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3391 return -EOPNOTSUPP; 3392 3393 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || 3394 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) 3395 return -EINVAL; 3396 3397 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { 3398 vi->rx_dim_enabled = true; 3399 for (i = 0; i < vi->max_queue_pairs; i++) 3400 vi->rq[i].dim_enabled = true; 3401 return 0; 3402 } 3403 3404 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { 3405 vi->rx_dim_enabled = false; 3406 for (i = 0; i < vi->max_queue_pairs; i++) 3407 vi->rq[i].dim_enabled = false; 3408 } 3409 3410 /* Since the per-queue coalescing params can be set, 3411 * we need apply the global new params even if they 3412 * are not updated. 3413 */ 3414 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 3415 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 3416 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); 3417 3418 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3419 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, 3420 &sgs_rx)) 3421 return -EINVAL; 3422 3423 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; 3424 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; 3425 for (i = 0; i < vi->max_queue_pairs; i++) { 3426 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; 3427 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; 3428 } 3429 3430 return 0; 3431 } 3432 3433 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 3434 struct ethtool_coalesce *ec) 3435 { 3436 int err; 3437 3438 err = virtnet_send_tx_notf_coal_cmds(vi, ec); 3439 if (err) 3440 return err; 3441 3442 err = virtnet_send_rx_notf_coal_cmds(vi, ec); 3443 if (err) 3444 return err; 3445 3446 return 0; 3447 } 3448 3449 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, 3450 struct ethtool_coalesce *ec, 3451 u16 queue) 3452 { 3453 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; 3454 bool cur_rx_dim = vi->rq[queue].dim_enabled; 3455 u32 max_usecs, max_packets; 3456 int err; 3457 3458 max_usecs = vi->rq[queue].intr_coal.max_usecs; 3459 max_packets = vi->rq[queue].intr_coal.max_packets; 3460 3461 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || 3462 ec->rx_max_coalesced_frames != max_packets)) 3463 return -EINVAL; 3464 3465 if (rx_ctrl_dim_on && !cur_rx_dim) { 3466 vi->rq[queue].dim_enabled = true; 3467 return 0; 3468 } 3469 3470 if (!rx_ctrl_dim_on && cur_rx_dim) 3471 vi->rq[queue].dim_enabled = false; 3472 3473 /* If no params are updated, userspace ethtool will 3474 * reject the modification. 3475 */ 3476 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, 3477 ec->rx_coalesce_usecs, 3478 ec->rx_max_coalesced_frames); 3479 if (err) 3480 return err; 3481 3482 return 0; 3483 } 3484 3485 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, 3486 struct ethtool_coalesce *ec, 3487 u16 queue) 3488 { 3489 int err; 3490 3491 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); 3492 if (err) 3493 return err; 3494 3495 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, 3496 ec->tx_coalesce_usecs, 3497 ec->tx_max_coalesced_frames); 3498 if (err) 3499 return err; 3500 3501 return 0; 3502 } 3503 3504 static void virtnet_rx_dim_work(struct work_struct *work) 3505 { 3506 struct dim *dim = container_of(work, struct dim, work); 3507 struct receive_queue *rq = container_of(dim, 3508 struct receive_queue, dim); 3509 struct virtnet_info *vi = rq->vq->vdev->priv; 3510 struct net_device *dev = vi->dev; 3511 struct dim_cq_moder update_moder; 3512 int i, qnum, err; 3513 3514 if (!rtnl_trylock()) 3515 return; 3516 3517 /* Each rxq's work is queued by "net_dim()->schedule_work()" 3518 * in response to NAPI traffic changes. Note that dim->profile_ix 3519 * for each rxq is updated prior to the queuing action. 3520 * So we only need to traverse and update profiles for all rxqs 3521 * in the work which is holding rtnl_lock. 3522 */ 3523 for (i = 0; i < vi->curr_queue_pairs; i++) { 3524 rq = &vi->rq[i]; 3525 dim = &rq->dim; 3526 qnum = rq - vi->rq; 3527 3528 if (!rq->dim_enabled) 3529 continue; 3530 3531 update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 3532 if (update_moder.usec != rq->intr_coal.max_usecs || 3533 update_moder.pkts != rq->intr_coal.max_packets) { 3534 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, 3535 update_moder.usec, 3536 update_moder.pkts); 3537 if (err) 3538 pr_debug("%s: Failed to send dim parameters on rxq%d\n", 3539 dev->name, qnum); 3540 dim->state = DIM_START_MEASURE; 3541 } 3542 } 3543 3544 rtnl_unlock(); 3545 } 3546 3547 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) 3548 { 3549 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL 3550 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated. 3551 */ 3552 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) 3553 return -EOPNOTSUPP; 3554 3555 if (ec->tx_max_coalesced_frames > 1 || 3556 ec->rx_max_coalesced_frames != 1) 3557 return -EINVAL; 3558 3559 return 0; 3560 } 3561 3562 static int virtnet_should_update_vq_weight(int dev_flags, int weight, 3563 int vq_weight, bool *should_update) 3564 { 3565 if (weight ^ vq_weight) { 3566 if (dev_flags & IFF_UP) 3567 return -EBUSY; 3568 *should_update = true; 3569 } 3570 3571 return 0; 3572 } 3573 3574 static int virtnet_set_coalesce(struct net_device *dev, 3575 struct ethtool_coalesce *ec, 3576 struct kernel_ethtool_coalesce *kernel_coal, 3577 struct netlink_ext_ack *extack) 3578 { 3579 struct virtnet_info *vi = netdev_priv(dev); 3580 int ret, queue_number, napi_weight; 3581 bool update_napi = false; 3582 3583 /* Can't change NAPI weight if the link is up */ 3584 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 3585 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { 3586 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, 3587 vi->sq[queue_number].napi.weight, 3588 &update_napi); 3589 if (ret) 3590 return ret; 3591 3592 if (update_napi) { 3593 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be 3594 * updated for the sake of simplicity, which might not be necessary 3595 */ 3596 break; 3597 } 3598 } 3599 3600 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) 3601 ret = virtnet_send_notf_coal_cmds(vi, ec); 3602 else 3603 ret = virtnet_coal_params_supported(ec); 3604 3605 if (ret) 3606 return ret; 3607 3608 if (update_napi) { 3609 for (; queue_number < vi->max_queue_pairs; queue_number++) 3610 vi->sq[queue_number].napi.weight = napi_weight; 3611 } 3612 3613 return ret; 3614 } 3615 3616 static int virtnet_get_coalesce(struct net_device *dev, 3617 struct ethtool_coalesce *ec, 3618 struct kernel_ethtool_coalesce *kernel_coal, 3619 struct netlink_ext_ack *extack) 3620 { 3621 struct virtnet_info *vi = netdev_priv(dev); 3622 3623 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 3624 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; 3625 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; 3626 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; 3627 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; 3628 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; 3629 } else { 3630 ec->rx_max_coalesced_frames = 1; 3631 3632 if (vi->sq[0].napi.weight) 3633 ec->tx_max_coalesced_frames = 1; 3634 } 3635 3636 return 0; 3637 } 3638 3639 static int virtnet_set_per_queue_coalesce(struct net_device *dev, 3640 u32 queue, 3641 struct ethtool_coalesce *ec) 3642 { 3643 struct virtnet_info *vi = netdev_priv(dev); 3644 int ret, napi_weight; 3645 bool update_napi = false; 3646 3647 if (queue >= vi->max_queue_pairs) 3648 return -EINVAL; 3649 3650 /* Can't change NAPI weight if the link is up */ 3651 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 3652 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, 3653 vi->sq[queue].napi.weight, 3654 &update_napi); 3655 if (ret) 3656 return ret; 3657 3658 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3659 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); 3660 else 3661 ret = virtnet_coal_params_supported(ec); 3662 3663 if (ret) 3664 return ret; 3665 3666 if (update_napi) 3667 vi->sq[queue].napi.weight = napi_weight; 3668 3669 return 0; 3670 } 3671 3672 static int virtnet_get_per_queue_coalesce(struct net_device *dev, 3673 u32 queue, 3674 struct ethtool_coalesce *ec) 3675 { 3676 struct virtnet_info *vi = netdev_priv(dev); 3677 3678 if (queue >= vi->max_queue_pairs) 3679 return -EINVAL; 3680 3681 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { 3682 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; 3683 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; 3684 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; 3685 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; 3686 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; 3687 } else { 3688 ec->rx_max_coalesced_frames = 1; 3689 3690 if (vi->sq[queue].napi.weight) 3691 ec->tx_max_coalesced_frames = 1; 3692 } 3693 3694 return 0; 3695 } 3696 3697 static void virtnet_init_settings(struct net_device *dev) 3698 { 3699 struct virtnet_info *vi = netdev_priv(dev); 3700 3701 vi->speed = SPEED_UNKNOWN; 3702 vi->duplex = DUPLEX_UNKNOWN; 3703 } 3704 3705 static void virtnet_update_settings(struct virtnet_info *vi) 3706 { 3707 u32 speed; 3708 u8 duplex; 3709 3710 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 3711 return; 3712 3713 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); 3714 3715 if (ethtool_validate_speed(speed)) 3716 vi->speed = speed; 3717 3718 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); 3719 3720 if (ethtool_validate_duplex(duplex)) 3721 vi->duplex = duplex; 3722 } 3723 3724 static u32 virtnet_get_rxfh_key_size(struct net_device *dev) 3725 { 3726 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; 3727 } 3728 3729 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) 3730 { 3731 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; 3732 } 3733 3734 static int virtnet_get_rxfh(struct net_device *dev, 3735 struct ethtool_rxfh_param *rxfh) 3736 { 3737 struct virtnet_info *vi = netdev_priv(dev); 3738 int i; 3739 3740 if (rxfh->indir) { 3741 for (i = 0; i < vi->rss_indir_table_size; ++i) 3742 rxfh->indir[i] = vi->ctrl->rss.indirection_table[i]; 3743 } 3744 3745 if (rxfh->key) 3746 memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size); 3747 3748 rxfh->hfunc = ETH_RSS_HASH_TOP; 3749 3750 return 0; 3751 } 3752 3753 static int virtnet_set_rxfh(struct net_device *dev, 3754 struct ethtool_rxfh_param *rxfh, 3755 struct netlink_ext_ack *extack) 3756 { 3757 struct virtnet_info *vi = netdev_priv(dev); 3758 int i; 3759 3760 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 3761 rxfh->hfunc != ETH_RSS_HASH_TOP) 3762 return -EOPNOTSUPP; 3763 3764 if (rxfh->indir) { 3765 for (i = 0; i < vi->rss_indir_table_size; ++i) 3766 vi->ctrl->rss.indirection_table[i] = rxfh->indir[i]; 3767 } 3768 if (rxfh->key) 3769 memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size); 3770 3771 virtnet_commit_rss_command(vi); 3772 3773 return 0; 3774 } 3775 3776 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 3777 { 3778 struct virtnet_info *vi = netdev_priv(dev); 3779 int rc = 0; 3780 3781 switch (info->cmd) { 3782 case ETHTOOL_GRXRINGS: 3783 info->data = vi->curr_queue_pairs; 3784 break; 3785 case ETHTOOL_GRXFH: 3786 virtnet_get_hashflow(vi, info); 3787 break; 3788 default: 3789 rc = -EOPNOTSUPP; 3790 } 3791 3792 return rc; 3793 } 3794 3795 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) 3796 { 3797 struct virtnet_info *vi = netdev_priv(dev); 3798 int rc = 0; 3799 3800 switch (info->cmd) { 3801 case ETHTOOL_SRXFH: 3802 if (!virtnet_set_hashflow(vi, info)) 3803 rc = -EINVAL; 3804 3805 break; 3806 default: 3807 rc = -EOPNOTSUPP; 3808 } 3809 3810 return rc; 3811 } 3812 3813 static const struct ethtool_ops virtnet_ethtool_ops = { 3814 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 3815 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 3816 .get_drvinfo = virtnet_get_drvinfo, 3817 .get_link = ethtool_op_get_link, 3818 .get_ringparam = virtnet_get_ringparam, 3819 .set_ringparam = virtnet_set_ringparam, 3820 .get_strings = virtnet_get_strings, 3821 .get_sset_count = virtnet_get_sset_count, 3822 .get_ethtool_stats = virtnet_get_ethtool_stats, 3823 .set_channels = virtnet_set_channels, 3824 .get_channels = virtnet_get_channels, 3825 .get_ts_info = ethtool_op_get_ts_info, 3826 .get_link_ksettings = virtnet_get_link_ksettings, 3827 .set_link_ksettings = virtnet_set_link_ksettings, 3828 .set_coalesce = virtnet_set_coalesce, 3829 .get_coalesce = virtnet_get_coalesce, 3830 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce, 3831 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce, 3832 .get_rxfh_key_size = virtnet_get_rxfh_key_size, 3833 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, 3834 .get_rxfh = virtnet_get_rxfh, 3835 .set_rxfh = virtnet_set_rxfh, 3836 .get_rxnfc = virtnet_get_rxnfc, 3837 .set_rxnfc = virtnet_set_rxnfc, 3838 }; 3839 3840 static void virtnet_freeze_down(struct virtio_device *vdev) 3841 { 3842 struct virtnet_info *vi = vdev->priv; 3843 3844 /* Make sure no work handler is accessing the device */ 3845 flush_work(&vi->config_work); 3846 3847 netif_tx_lock_bh(vi->dev); 3848 netif_device_detach(vi->dev); 3849 netif_tx_unlock_bh(vi->dev); 3850 if (netif_running(vi->dev)) 3851 virtnet_close(vi->dev); 3852 } 3853 3854 static int init_vqs(struct virtnet_info *vi); 3855 3856 static int virtnet_restore_up(struct virtio_device *vdev) 3857 { 3858 struct virtnet_info *vi = vdev->priv; 3859 int err; 3860 3861 err = init_vqs(vi); 3862 if (err) 3863 return err; 3864 3865 virtio_device_ready(vdev); 3866 3867 enable_delayed_refill(vi); 3868 3869 if (netif_running(vi->dev)) { 3870 err = virtnet_open(vi->dev); 3871 if (err) 3872 return err; 3873 } 3874 3875 netif_tx_lock_bh(vi->dev); 3876 netif_device_attach(vi->dev); 3877 netif_tx_unlock_bh(vi->dev); 3878 return err; 3879 } 3880 3881 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 3882 { 3883 struct scatterlist sg; 3884 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 3885 3886 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 3887 3888 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 3889 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 3890 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); 3891 return -EINVAL; 3892 } 3893 3894 return 0; 3895 } 3896 3897 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 3898 { 3899 u64 offloads = 0; 3900 3901 if (!vi->guest_offloads) 3902 return 0; 3903 3904 return virtnet_set_guest_offloads(vi, offloads); 3905 } 3906 3907 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 3908 { 3909 u64 offloads = vi->guest_offloads; 3910 3911 if (!vi->guest_offloads) 3912 return 0; 3913 3914 return virtnet_set_guest_offloads(vi, offloads); 3915 } 3916 3917 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 3918 struct netlink_ext_ack *extack) 3919 { 3920 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + 3921 sizeof(struct skb_shared_info)); 3922 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; 3923 struct virtnet_info *vi = netdev_priv(dev); 3924 struct bpf_prog *old_prog; 3925 u16 xdp_qp = 0, curr_qp; 3926 int i, err; 3927 3928 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 3929 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 3930 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 3931 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 3932 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 3933 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || 3934 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || 3935 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { 3936 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 3937 return -EOPNOTSUPP; 3938 } 3939 3940 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 3941 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 3942 return -EINVAL; 3943 } 3944 3945 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { 3946 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); 3947 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); 3948 return -EINVAL; 3949 } 3950 3951 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 3952 if (prog) 3953 xdp_qp = nr_cpu_ids; 3954 3955 /* XDP requires extra queues for XDP_TX */ 3956 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 3957 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", 3958 curr_qp + xdp_qp, vi->max_queue_pairs); 3959 xdp_qp = 0; 3960 } 3961 3962 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 3963 if (!prog && !old_prog) 3964 return 0; 3965 3966 if (prog) 3967 bpf_prog_add(prog, vi->max_queue_pairs - 1); 3968 3969 /* Make sure NAPI is not using any XDP TX queues for RX. */ 3970 if (netif_running(dev)) { 3971 for (i = 0; i < vi->max_queue_pairs; i++) { 3972 napi_disable(&vi->rq[i].napi); 3973 virtnet_napi_tx_disable(&vi->sq[i].napi); 3974 } 3975 } 3976 3977 if (!prog) { 3978 for (i = 0; i < vi->max_queue_pairs; i++) { 3979 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3980 if (i == 0) 3981 virtnet_restore_guest_offloads(vi); 3982 } 3983 synchronize_net(); 3984 } 3985 3986 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 3987 if (err) 3988 goto err; 3989 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 3990 vi->xdp_queue_pairs = xdp_qp; 3991 3992 if (prog) { 3993 vi->xdp_enabled = true; 3994 for (i = 0; i < vi->max_queue_pairs; i++) { 3995 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 3996 if (i == 0 && !old_prog) 3997 virtnet_clear_guest_offloads(vi); 3998 } 3999 if (!old_prog) 4000 xdp_features_set_redirect_target(dev, true); 4001 } else { 4002 xdp_features_clear_redirect_target(dev); 4003 vi->xdp_enabled = false; 4004 } 4005 4006 for (i = 0; i < vi->max_queue_pairs; i++) { 4007 if (old_prog) 4008 bpf_prog_put(old_prog); 4009 if (netif_running(dev)) { 4010 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 4011 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 4012 &vi->sq[i].napi); 4013 } 4014 } 4015 4016 return 0; 4017 4018 err: 4019 if (!prog) { 4020 virtnet_clear_guest_offloads(vi); 4021 for (i = 0; i < vi->max_queue_pairs; i++) 4022 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 4023 } 4024 4025 if (netif_running(dev)) { 4026 for (i = 0; i < vi->max_queue_pairs; i++) { 4027 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 4028 virtnet_napi_tx_enable(vi, vi->sq[i].vq, 4029 &vi->sq[i].napi); 4030 } 4031 } 4032 if (prog) 4033 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 4034 return err; 4035 } 4036 4037 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 4038 { 4039 switch (xdp->command) { 4040 case XDP_SETUP_PROG: 4041 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 4042 default: 4043 return -EINVAL; 4044 } 4045 } 4046 4047 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, 4048 size_t len) 4049 { 4050 struct virtnet_info *vi = netdev_priv(dev); 4051 int ret; 4052 4053 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 4054 return -EOPNOTSUPP; 4055 4056 ret = snprintf(buf, len, "sby"); 4057 if (ret >= len) 4058 return -EOPNOTSUPP; 4059 4060 return 0; 4061 } 4062 4063 static int virtnet_set_features(struct net_device *dev, 4064 netdev_features_t features) 4065 { 4066 struct virtnet_info *vi = netdev_priv(dev); 4067 u64 offloads; 4068 int err; 4069 4070 if ((dev->features ^ features) & NETIF_F_GRO_HW) { 4071 if (vi->xdp_enabled) 4072 return -EBUSY; 4073 4074 if (features & NETIF_F_GRO_HW) 4075 offloads = vi->guest_offloads_capable; 4076 else 4077 offloads = vi->guest_offloads_capable & 4078 ~GUEST_OFFLOAD_GRO_HW_MASK; 4079 4080 err = virtnet_set_guest_offloads(vi, offloads); 4081 if (err) 4082 return err; 4083 vi->guest_offloads = offloads; 4084 } 4085 4086 if ((dev->features ^ features) & NETIF_F_RXHASH) { 4087 if (features & NETIF_F_RXHASH) 4088 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; 4089 else 4090 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; 4091 4092 if (!virtnet_commit_rss_command(vi)) 4093 return -EINVAL; 4094 } 4095 4096 return 0; 4097 } 4098 4099 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) 4100 { 4101 struct virtnet_info *priv = netdev_priv(dev); 4102 struct send_queue *sq = &priv->sq[txqueue]; 4103 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 4104 4105 u64_stats_update_begin(&sq->stats.syncp); 4106 u64_stats_inc(&sq->stats.tx_timeouts); 4107 u64_stats_update_end(&sq->stats.syncp); 4108 4109 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", 4110 txqueue, sq->name, sq->vq->index, sq->vq->name, 4111 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); 4112 } 4113 4114 static const struct net_device_ops virtnet_netdev = { 4115 .ndo_open = virtnet_open, 4116 .ndo_stop = virtnet_close, 4117 .ndo_start_xmit = start_xmit, 4118 .ndo_validate_addr = eth_validate_addr, 4119 .ndo_set_mac_address = virtnet_set_mac_address, 4120 .ndo_set_rx_mode = virtnet_set_rx_mode, 4121 .ndo_get_stats64 = virtnet_stats, 4122 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 4123 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 4124 .ndo_bpf = virtnet_xdp, 4125 .ndo_xdp_xmit = virtnet_xdp_xmit, 4126 .ndo_features_check = passthru_features_check, 4127 .ndo_get_phys_port_name = virtnet_get_phys_port_name, 4128 .ndo_set_features = virtnet_set_features, 4129 .ndo_tx_timeout = virtnet_tx_timeout, 4130 }; 4131 4132 static void virtnet_config_changed_work(struct work_struct *work) 4133 { 4134 struct virtnet_info *vi = 4135 container_of(work, struct virtnet_info, config_work); 4136 u16 v; 4137 4138 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 4139 struct virtio_net_config, status, &v) < 0) 4140 return; 4141 4142 if (v & VIRTIO_NET_S_ANNOUNCE) { 4143 netdev_notify_peers(vi->dev); 4144 virtnet_ack_link_announce(vi); 4145 } 4146 4147 /* Ignore unknown (future) status bits */ 4148 v &= VIRTIO_NET_S_LINK_UP; 4149 4150 if (vi->status == v) 4151 return; 4152 4153 vi->status = v; 4154 4155 if (vi->status & VIRTIO_NET_S_LINK_UP) { 4156 virtnet_update_settings(vi); 4157 netif_carrier_on(vi->dev); 4158 netif_tx_wake_all_queues(vi->dev); 4159 } else { 4160 netif_carrier_off(vi->dev); 4161 netif_tx_stop_all_queues(vi->dev); 4162 } 4163 } 4164 4165 static void virtnet_config_changed(struct virtio_device *vdev) 4166 { 4167 struct virtnet_info *vi = vdev->priv; 4168 4169 schedule_work(&vi->config_work); 4170 } 4171 4172 static void virtnet_free_queues(struct virtnet_info *vi) 4173 { 4174 int i; 4175 4176 for (i = 0; i < vi->max_queue_pairs; i++) { 4177 __netif_napi_del(&vi->rq[i].napi); 4178 __netif_napi_del(&vi->sq[i].napi); 4179 } 4180 4181 /* We called __netif_napi_del(), 4182 * we need to respect an RCU grace period before freeing vi->rq 4183 */ 4184 synchronize_net(); 4185 4186 kfree(vi->rq); 4187 kfree(vi->sq); 4188 kfree(vi->ctrl); 4189 } 4190 4191 static void _free_receive_bufs(struct virtnet_info *vi) 4192 { 4193 struct bpf_prog *old_prog; 4194 int i; 4195 4196 for (i = 0; i < vi->max_queue_pairs; i++) { 4197 while (vi->rq[i].pages) 4198 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 4199 4200 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 4201 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 4202 if (old_prog) 4203 bpf_prog_put(old_prog); 4204 } 4205 } 4206 4207 static void free_receive_bufs(struct virtnet_info *vi) 4208 { 4209 rtnl_lock(); 4210 _free_receive_bufs(vi); 4211 rtnl_unlock(); 4212 } 4213 4214 static void free_receive_page_frags(struct virtnet_info *vi) 4215 { 4216 int i; 4217 for (i = 0; i < vi->max_queue_pairs; i++) 4218 if (vi->rq[i].alloc_frag.page) { 4219 if (vi->rq[i].do_dma && vi->rq[i].last_dma) 4220 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); 4221 put_page(vi->rq[i].alloc_frag.page); 4222 } 4223 } 4224 4225 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) 4226 { 4227 if (!is_xdp_frame(buf)) 4228 dev_kfree_skb(buf); 4229 else 4230 xdp_return_frame(ptr_to_xdp(buf)); 4231 } 4232 4233 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf) 4234 { 4235 struct virtnet_info *vi = vq->vdev->priv; 4236 int i = vq2rxq(vq); 4237 4238 if (vi->mergeable_rx_bufs) 4239 put_page(virt_to_head_page(buf)); 4240 else if (vi->big_packets) 4241 give_pages(&vi->rq[i], buf); 4242 else 4243 put_page(virt_to_head_page(buf)); 4244 } 4245 4246 static void free_unused_bufs(struct virtnet_info *vi) 4247 { 4248 void *buf; 4249 int i; 4250 4251 for (i = 0; i < vi->max_queue_pairs; i++) { 4252 struct virtqueue *vq = vi->sq[i].vq; 4253 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 4254 virtnet_sq_free_unused_buf(vq, buf); 4255 cond_resched(); 4256 } 4257 4258 for (i = 0; i < vi->max_queue_pairs; i++) { 4259 struct receive_queue *rq = &vi->rq[i]; 4260 4261 while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) 4262 virtnet_rq_free_unused_buf(rq->vq, buf); 4263 cond_resched(); 4264 } 4265 } 4266 4267 static void virtnet_del_vqs(struct virtnet_info *vi) 4268 { 4269 struct virtio_device *vdev = vi->vdev; 4270 4271 virtnet_clean_affinity(vi); 4272 4273 vdev->config->del_vqs(vdev); 4274 4275 virtnet_free_queues(vi); 4276 } 4277 4278 /* How large should a single buffer be so a queue full of these can fit at 4279 * least one full packet? 4280 * Logic below assumes the mergeable buffer header is used. 4281 */ 4282 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 4283 { 4284 const unsigned int hdr_len = vi->hdr_len; 4285 unsigned int rq_size = virtqueue_get_vring_size(vq); 4286 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 4287 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 4288 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 4289 4290 return max(max(min_buf_len, hdr_len) - hdr_len, 4291 (unsigned int)GOOD_PACKET_LEN); 4292 } 4293 4294 static int virtnet_find_vqs(struct virtnet_info *vi) 4295 { 4296 vq_callback_t **callbacks; 4297 struct virtqueue **vqs; 4298 int ret = -ENOMEM; 4299 int i, total_vqs; 4300 const char **names; 4301 bool *ctx; 4302 4303 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 4304 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 4305 * possible control vq. 4306 */ 4307 total_vqs = vi->max_queue_pairs * 2 + 4308 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 4309 4310 /* Allocate space for find_vqs parameters */ 4311 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 4312 if (!vqs) 4313 goto err_vq; 4314 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); 4315 if (!callbacks) 4316 goto err_callback; 4317 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); 4318 if (!names) 4319 goto err_names; 4320 if (!vi->big_packets || vi->mergeable_rx_bufs) { 4321 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); 4322 if (!ctx) 4323 goto err_ctx; 4324 } else { 4325 ctx = NULL; 4326 } 4327 4328 /* Parameters for control virtqueue, if any */ 4329 if (vi->has_cvq) { 4330 callbacks[total_vqs - 1] = NULL; 4331 names[total_vqs - 1] = "control"; 4332 } 4333 4334 /* Allocate/initialize parameters for send/receive virtqueues */ 4335 for (i = 0; i < vi->max_queue_pairs; i++) { 4336 callbacks[rxq2vq(i)] = skb_recv_done; 4337 callbacks[txq2vq(i)] = skb_xmit_done; 4338 sprintf(vi->rq[i].name, "input.%d", i); 4339 sprintf(vi->sq[i].name, "output.%d", i); 4340 names[rxq2vq(i)] = vi->rq[i].name; 4341 names[txq2vq(i)] = vi->sq[i].name; 4342 if (ctx) 4343 ctx[rxq2vq(i)] = true; 4344 } 4345 4346 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks, 4347 names, ctx, NULL); 4348 if (ret) 4349 goto err_find; 4350 4351 if (vi->has_cvq) { 4352 vi->cvq = vqs[total_vqs - 1]; 4353 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 4354 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4355 } 4356 4357 for (i = 0; i < vi->max_queue_pairs; i++) { 4358 vi->rq[i].vq = vqs[rxq2vq(i)]; 4359 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 4360 vi->sq[i].vq = vqs[txq2vq(i)]; 4361 } 4362 4363 /* run here: ret == 0. */ 4364 4365 4366 err_find: 4367 kfree(ctx); 4368 err_ctx: 4369 kfree(names); 4370 err_names: 4371 kfree(callbacks); 4372 err_callback: 4373 kfree(vqs); 4374 err_vq: 4375 return ret; 4376 } 4377 4378 static int virtnet_alloc_queues(struct virtnet_info *vi) 4379 { 4380 int i; 4381 4382 if (vi->has_cvq) { 4383 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 4384 if (!vi->ctrl) 4385 goto err_ctrl; 4386 } else { 4387 vi->ctrl = NULL; 4388 } 4389 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); 4390 if (!vi->sq) 4391 goto err_sq; 4392 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); 4393 if (!vi->rq) 4394 goto err_rq; 4395 4396 INIT_DELAYED_WORK(&vi->refill, refill_work); 4397 for (i = 0; i < vi->max_queue_pairs; i++) { 4398 vi->rq[i].pages = NULL; 4399 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, 4400 napi_weight); 4401 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 4402 virtnet_poll_tx, 4403 napi_tx ? napi_weight : 0); 4404 4405 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work); 4406 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4407 4408 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 4409 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 4410 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 4411 4412 u64_stats_init(&vi->rq[i].stats.syncp); 4413 u64_stats_init(&vi->sq[i].stats.syncp); 4414 } 4415 4416 return 0; 4417 4418 err_rq: 4419 kfree(vi->sq); 4420 err_sq: 4421 kfree(vi->ctrl); 4422 err_ctrl: 4423 return -ENOMEM; 4424 } 4425 4426 static int init_vqs(struct virtnet_info *vi) 4427 { 4428 int ret; 4429 4430 /* Allocate send & receive queues */ 4431 ret = virtnet_alloc_queues(vi); 4432 if (ret) 4433 goto err; 4434 4435 ret = virtnet_find_vqs(vi); 4436 if (ret) 4437 goto err_free; 4438 4439 virtnet_rq_set_premapped(vi); 4440 4441 cpus_read_lock(); 4442 virtnet_set_affinity(vi); 4443 cpus_read_unlock(); 4444 4445 return 0; 4446 4447 err_free: 4448 virtnet_free_queues(vi); 4449 err: 4450 return ret; 4451 } 4452 4453 #ifdef CONFIG_SYSFS 4454 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 4455 char *buf) 4456 { 4457 struct virtnet_info *vi = netdev_priv(queue->dev); 4458 unsigned int queue_index = get_netdev_rx_queue_index(queue); 4459 unsigned int headroom = virtnet_get_headroom(vi); 4460 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 4461 struct ewma_pkt_len *avg; 4462 4463 BUG_ON(queue_index >= vi->max_queue_pairs); 4464 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 4465 return sprintf(buf, "%u\n", 4466 get_mergeable_buf_len(&vi->rq[queue_index], avg, 4467 SKB_DATA_ALIGN(headroom + tailroom))); 4468 } 4469 4470 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 4471 __ATTR_RO(mergeable_rx_buffer_size); 4472 4473 static struct attribute *virtio_net_mrg_rx_attrs[] = { 4474 &mergeable_rx_buffer_size_attribute.attr, 4475 NULL 4476 }; 4477 4478 static const struct attribute_group virtio_net_mrg_rx_group = { 4479 .name = "virtio_net", 4480 .attrs = virtio_net_mrg_rx_attrs 4481 }; 4482 #endif 4483 4484 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 4485 unsigned int fbit, 4486 const char *fname, const char *dname) 4487 { 4488 if (!virtio_has_feature(vdev, fbit)) 4489 return false; 4490 4491 dev_err(&vdev->dev, "device advertises feature %s but not %s", 4492 fname, dname); 4493 4494 return true; 4495 } 4496 4497 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 4498 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 4499 4500 static bool virtnet_validate_features(struct virtio_device *vdev) 4501 { 4502 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 4503 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 4504 "VIRTIO_NET_F_CTRL_VQ") || 4505 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 4506 "VIRTIO_NET_F_CTRL_VQ") || 4507 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 4508 "VIRTIO_NET_F_CTRL_VQ") || 4509 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 4510 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 4511 "VIRTIO_NET_F_CTRL_VQ") || 4512 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, 4513 "VIRTIO_NET_F_CTRL_VQ") || 4514 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, 4515 "VIRTIO_NET_F_CTRL_VQ") || 4516 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, 4517 "VIRTIO_NET_F_CTRL_VQ") || 4518 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL, 4519 "VIRTIO_NET_F_CTRL_VQ"))) { 4520 return false; 4521 } 4522 4523 return true; 4524 } 4525 4526 #define MIN_MTU ETH_MIN_MTU 4527 #define MAX_MTU ETH_MAX_MTU 4528 4529 static int virtnet_validate(struct virtio_device *vdev) 4530 { 4531 if (!vdev->config->get) { 4532 dev_err(&vdev->dev, "%s failure: config access disabled\n", 4533 __func__); 4534 return -EINVAL; 4535 } 4536 4537 if (!virtnet_validate_features(vdev)) 4538 return -EINVAL; 4539 4540 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 4541 int mtu = virtio_cread16(vdev, 4542 offsetof(struct virtio_net_config, 4543 mtu)); 4544 if (mtu < MIN_MTU) 4545 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 4546 } 4547 4548 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && 4549 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 4550 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); 4551 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); 4552 } 4553 4554 return 0; 4555 } 4556 4557 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) 4558 { 4559 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 4560 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 4561 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 4562 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 4563 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && 4564 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); 4565 } 4566 4567 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) 4568 { 4569 bool guest_gso = virtnet_check_guest_gso(vi); 4570 4571 /* If device can receive ANY guest GSO packets, regardless of mtu, 4572 * allocate packets of maximum size, otherwise limit it to only 4573 * mtu size worth only. 4574 */ 4575 if (mtu > ETH_DATA_LEN || guest_gso) { 4576 vi->big_packets = true; 4577 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); 4578 } 4579 } 4580 4581 static int virtnet_probe(struct virtio_device *vdev) 4582 { 4583 int i, err = -ENOMEM; 4584 struct net_device *dev; 4585 struct virtnet_info *vi; 4586 u16 max_queue_pairs; 4587 int mtu = 0; 4588 4589 /* Find if host supports multiqueue/rss virtio_net device */ 4590 max_queue_pairs = 1; 4591 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 4592 max_queue_pairs = 4593 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); 4594 4595 /* We need at least 2 queue's */ 4596 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 4597 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 4598 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 4599 max_queue_pairs = 1; 4600 4601 /* Allocate ourselves a network device with room for our info */ 4602 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 4603 if (!dev) 4604 return -ENOMEM; 4605 4606 /* Set up network device as normal. */ 4607 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | 4608 IFF_TX_SKB_NO_LINEAR; 4609 dev->netdev_ops = &virtnet_netdev; 4610 dev->features = NETIF_F_HIGHDMA; 4611 4612 dev->ethtool_ops = &virtnet_ethtool_ops; 4613 SET_NETDEV_DEV(dev, &vdev->dev); 4614 4615 /* Do we support "hardware" checksums? */ 4616 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 4617 /* This opens up the world of extra features. */ 4618 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4619 if (csum) 4620 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4621 4622 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 4623 dev->hw_features |= NETIF_F_TSO 4624 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 4625 } 4626 /* Individual feature bits: what can host handle? */ 4627 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 4628 dev->hw_features |= NETIF_F_TSO; 4629 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 4630 dev->hw_features |= NETIF_F_TSO6; 4631 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 4632 dev->hw_features |= NETIF_F_TSO_ECN; 4633 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) 4634 dev->hw_features |= NETIF_F_GSO_UDP_L4; 4635 4636 dev->features |= NETIF_F_GSO_ROBUST; 4637 4638 if (gso) 4639 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 4640 /* (!csum && gso) case will be fixed by register_netdev() */ 4641 } 4642 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 4643 dev->features |= NETIF_F_RXCSUM; 4644 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 4645 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 4646 dev->features |= NETIF_F_GRO_HW; 4647 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 4648 dev->hw_features |= NETIF_F_GRO_HW; 4649 4650 dev->vlan_features = dev->features; 4651 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; 4652 4653 /* MTU range: 68 - 65535 */ 4654 dev->min_mtu = MIN_MTU; 4655 dev->max_mtu = MAX_MTU; 4656 4657 /* Configuration may specify what MAC to use. Otherwise random. */ 4658 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 4659 u8 addr[ETH_ALEN]; 4660 4661 virtio_cread_bytes(vdev, 4662 offsetof(struct virtio_net_config, mac), 4663 addr, ETH_ALEN); 4664 eth_hw_addr_set(dev, addr); 4665 } else { 4666 eth_hw_addr_random(dev); 4667 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", 4668 dev->dev_addr); 4669 } 4670 4671 /* Set up our device-specific information */ 4672 vi = netdev_priv(dev); 4673 vi->dev = dev; 4674 vi->vdev = vdev; 4675 vdev->priv = vi; 4676 4677 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 4678 spin_lock_init(&vi->refill_lock); 4679 4680 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { 4681 vi->mergeable_rx_bufs = true; 4682 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; 4683 } 4684 4685 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) 4686 vi->has_rss_hash_report = true; 4687 4688 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 4689 vi->has_rss = true; 4690 4691 if (vi->has_rss || vi->has_rss_hash_report) { 4692 vi->rss_indir_table_size = 4693 virtio_cread16(vdev, offsetof(struct virtio_net_config, 4694 rss_max_indirection_table_length)); 4695 vi->rss_key_size = 4696 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); 4697 4698 vi->rss_hash_types_supported = 4699 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); 4700 vi->rss_hash_types_supported &= 4701 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | 4702 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 4703 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); 4704 4705 dev->hw_features |= NETIF_F_RXHASH; 4706 } 4707 4708 if (vi->has_rss_hash_report) 4709 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); 4710 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 4711 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4712 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 4713 else 4714 vi->hdr_len = sizeof(struct virtio_net_hdr); 4715 4716 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 4717 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 4718 vi->any_header_sg = true; 4719 4720 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 4721 vi->has_cvq = true; 4722 4723 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 4724 mtu = virtio_cread16(vdev, 4725 offsetof(struct virtio_net_config, 4726 mtu)); 4727 if (mtu < dev->min_mtu) { 4728 /* Should never trigger: MTU was previously validated 4729 * in virtnet_validate. 4730 */ 4731 dev_err(&vdev->dev, 4732 "device MTU appears to have changed it is now %d < %d", 4733 mtu, dev->min_mtu); 4734 err = -EINVAL; 4735 goto free; 4736 } 4737 4738 dev->mtu = mtu; 4739 dev->max_mtu = mtu; 4740 } 4741 4742 virtnet_set_big_packets(vi, mtu); 4743 4744 if (vi->any_header_sg) 4745 dev->needed_headroom = vi->hdr_len; 4746 4747 /* Enable multiqueue by default */ 4748 if (num_online_cpus() >= max_queue_pairs) 4749 vi->curr_queue_pairs = max_queue_pairs; 4750 else 4751 vi->curr_queue_pairs = num_online_cpus(); 4752 vi->max_queue_pairs = max_queue_pairs; 4753 4754 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 4755 err = init_vqs(vi); 4756 if (err) 4757 goto free; 4758 4759 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 4760 vi->intr_coal_rx.max_usecs = 0; 4761 vi->intr_coal_tx.max_usecs = 0; 4762 vi->intr_coal_rx.max_packets = 0; 4763 4764 /* Keep the default values of the coalescing parameters 4765 * aligned with the default napi_tx state. 4766 */ 4767 if (vi->sq[0].napi.weight) 4768 vi->intr_coal_tx.max_packets = 1; 4769 else 4770 vi->intr_coal_tx.max_packets = 0; 4771 } 4772 4773 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { 4774 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */ 4775 for (i = 0; i < vi->max_queue_pairs; i++) 4776 if (vi->sq[i].napi.weight) 4777 vi->sq[i].intr_coal.max_packets = 1; 4778 } 4779 4780 #ifdef CONFIG_SYSFS 4781 if (vi->mergeable_rx_bufs) 4782 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 4783 #endif 4784 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 4785 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 4786 4787 virtnet_init_settings(dev); 4788 4789 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 4790 vi->failover = net_failover_create(vi->dev); 4791 if (IS_ERR(vi->failover)) { 4792 err = PTR_ERR(vi->failover); 4793 goto free_vqs; 4794 } 4795 } 4796 4797 if (vi->has_rss || vi->has_rss_hash_report) 4798 virtnet_init_default_rss(vi); 4799 4800 /* serialize netdev register + virtio_device_ready() with ndo_open() */ 4801 rtnl_lock(); 4802 4803 err = register_netdevice(dev); 4804 if (err) { 4805 pr_debug("virtio_net: registering device failed\n"); 4806 rtnl_unlock(); 4807 goto free_failover; 4808 } 4809 4810 virtio_device_ready(vdev); 4811 4812 _virtnet_set_queues(vi, vi->curr_queue_pairs); 4813 4814 /* a random MAC address has been assigned, notify the device. 4815 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there 4816 * because many devices work fine without getting MAC explicitly 4817 */ 4818 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 4819 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 4820 struct scatterlist sg; 4821 4822 sg_init_one(&sg, dev->dev_addr, dev->addr_len); 4823 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 4824 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 4825 pr_debug("virtio_net: setting MAC address failed\n"); 4826 rtnl_unlock(); 4827 err = -EINVAL; 4828 goto free_unregister_netdev; 4829 } 4830 } 4831 4832 rtnl_unlock(); 4833 4834 err = virtnet_cpu_notif_add(vi); 4835 if (err) { 4836 pr_debug("virtio_net: registering cpu notifier failed\n"); 4837 goto free_unregister_netdev; 4838 } 4839 4840 /* Assume link up if device can't report link status, 4841 otherwise get link status from config. */ 4842 netif_carrier_off(dev); 4843 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 4844 schedule_work(&vi->config_work); 4845 } else { 4846 vi->status = VIRTIO_NET_S_LINK_UP; 4847 virtnet_update_settings(vi); 4848 netif_carrier_on(dev); 4849 } 4850 4851 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 4852 if (virtio_has_feature(vi->vdev, guest_offloads[i])) 4853 set_bit(guest_offloads[i], &vi->guest_offloads); 4854 vi->guest_offloads_capable = vi->guest_offloads; 4855 4856 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 4857 dev->name, max_queue_pairs); 4858 4859 return 0; 4860 4861 free_unregister_netdev: 4862 unregister_netdev(dev); 4863 free_failover: 4864 net_failover_destroy(vi->failover); 4865 free_vqs: 4866 virtio_reset_device(vdev); 4867 cancel_delayed_work_sync(&vi->refill); 4868 free_receive_page_frags(vi); 4869 virtnet_del_vqs(vi); 4870 free: 4871 free_netdev(dev); 4872 return err; 4873 } 4874 4875 static void remove_vq_common(struct virtnet_info *vi) 4876 { 4877 virtio_reset_device(vi->vdev); 4878 4879 /* Free unused buffers in both send and recv, if any. */ 4880 free_unused_bufs(vi); 4881 4882 free_receive_bufs(vi); 4883 4884 free_receive_page_frags(vi); 4885 4886 virtnet_del_vqs(vi); 4887 } 4888 4889 static void virtnet_remove(struct virtio_device *vdev) 4890 { 4891 struct virtnet_info *vi = vdev->priv; 4892 4893 virtnet_cpu_notif_remove(vi); 4894 4895 /* Make sure no work handler is accessing the device. */ 4896 flush_work(&vi->config_work); 4897 4898 unregister_netdev(vi->dev); 4899 4900 net_failover_destroy(vi->failover); 4901 4902 remove_vq_common(vi); 4903 4904 free_netdev(vi->dev); 4905 } 4906 4907 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 4908 { 4909 struct virtnet_info *vi = vdev->priv; 4910 4911 virtnet_cpu_notif_remove(vi); 4912 virtnet_freeze_down(vdev); 4913 remove_vq_common(vi); 4914 4915 return 0; 4916 } 4917 4918 static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 4919 { 4920 struct virtnet_info *vi = vdev->priv; 4921 int err; 4922 4923 err = virtnet_restore_up(vdev); 4924 if (err) 4925 return err; 4926 virtnet_set_queues(vi, vi->curr_queue_pairs); 4927 4928 err = virtnet_cpu_notif_add(vi); 4929 if (err) { 4930 virtnet_freeze_down(vdev); 4931 remove_vq_common(vi); 4932 return err; 4933 } 4934 4935 return 0; 4936 } 4937 4938 static struct virtio_device_id id_table[] = { 4939 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 4940 { 0 }, 4941 }; 4942 4943 #define VIRTNET_FEATURES \ 4944 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 4945 VIRTIO_NET_F_MAC, \ 4946 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 4947 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 4948 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 4949 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ 4950 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 4951 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 4952 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 4953 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 4954 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 4955 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ 4956 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ 4957 VIRTIO_NET_F_VQ_NOTF_COAL, \ 4958 VIRTIO_NET_F_GUEST_HDRLEN 4959 4960 static unsigned int features[] = { 4961 VIRTNET_FEATURES, 4962 }; 4963 4964 static unsigned int features_legacy[] = { 4965 VIRTNET_FEATURES, 4966 VIRTIO_NET_F_GSO, 4967 VIRTIO_F_ANY_LAYOUT, 4968 }; 4969 4970 static struct virtio_driver virtio_net_driver = { 4971 .feature_table = features, 4972 .feature_table_size = ARRAY_SIZE(features), 4973 .feature_table_legacy = features_legacy, 4974 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 4975 .driver.name = KBUILD_MODNAME, 4976 .driver.owner = THIS_MODULE, 4977 .id_table = id_table, 4978 .validate = virtnet_validate, 4979 .probe = virtnet_probe, 4980 .remove = virtnet_remove, 4981 .config_changed = virtnet_config_changed, 4982 #ifdef CONFIG_PM_SLEEP 4983 .freeze = virtnet_freeze, 4984 .restore = virtnet_restore, 4985 #endif 4986 }; 4987 4988 static __init int virtio_net_driver_init(void) 4989 { 4990 int ret; 4991 4992 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 4993 virtnet_cpu_online, 4994 virtnet_cpu_down_prep); 4995 if (ret < 0) 4996 goto out; 4997 virtionet_online = ret; 4998 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 4999 NULL, virtnet_cpu_dead); 5000 if (ret) 5001 goto err_dead; 5002 ret = register_virtio_driver(&virtio_net_driver); 5003 if (ret) 5004 goto err_virtio; 5005 return 0; 5006 err_virtio: 5007 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 5008 err_dead: 5009 cpuhp_remove_multi_state(virtionet_online); 5010 out: 5011 return ret; 5012 } 5013 module_init(virtio_net_driver_init); 5014 5015 static __exit void virtio_net_driver_exit(void) 5016 { 5017 unregister_virtio_driver(&virtio_net_driver); 5018 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 5019 cpuhp_remove_multi_state(virtionet_online); 5020 } 5021 module_exit(virtio_net_driver_exit); 5022 5023 MODULE_DEVICE_TABLE(virtio, id_table); 5024 MODULE_DESCRIPTION("Virtio network driver"); 5025 MODULE_LICENSE("GPL"); 5026