1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* A network driver using virtio. 3 * 4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 5 */ 6 //#define DEBUG 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/ethtool.h> 10 #include <linux/module.h> 11 #include <linux/virtio.h> 12 #include <linux/virtio_net.h> 13 #include <linux/bpf.h> 14 #include <linux/bpf_trace.h> 15 #include <linux/scatterlist.h> 16 #include <linux/if_vlan.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/average.h> 20 #include <linux/filter.h> 21 #include <linux/kernel.h> 22 #include <linux/dim.h> 23 #include <net/route.h> 24 #include <net/xdp.h> 25 #include <net/net_failover.h> 26 #include <net/netdev_rx_queue.h> 27 #include <net/netdev_queues.h> 28 #include <net/xdp_sock_drv.h> 29 30 static int napi_weight = NAPI_POLL_WEIGHT; 31 module_param(napi_weight, int, 0444); 32 33 static bool csum = true, gso = true, napi_tx = true; 34 module_param(csum, bool, 0444); 35 module_param(gso, bool, 0444); 36 module_param(napi_tx, bool, 0644); 37 38 /* FIXME: MTU in config. */ 39 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 40 #define GOOD_COPY_LEN 128 41 42 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 43 44 /* Separating two types of XDP xmit */ 45 #define VIRTIO_XDP_TX BIT(0) 46 #define VIRTIO_XDP_REDIR BIT(1) 47 48 /* RX packet size EWMA. The average packet size is used to determine the packet 49 * buffer size when refilling RX rings. As the entire RX ring may be refilled 50 * at once, the weight is chosen so that the EWMA will be insensitive to short- 51 * term, transient changes in packet size. 52 */ 53 DECLARE_EWMA(pkt_len, 0, 64) 54 55 #define VIRTNET_DRIVER_VERSION "1.0.0" 56 57 static const unsigned long guest_offloads[] = { 58 VIRTIO_NET_F_GUEST_TSO4, 59 VIRTIO_NET_F_GUEST_TSO6, 60 VIRTIO_NET_F_GUEST_ECN, 61 VIRTIO_NET_F_GUEST_UFO, 62 VIRTIO_NET_F_GUEST_CSUM, 63 VIRTIO_NET_F_GUEST_USO4, 64 VIRTIO_NET_F_GUEST_USO6, 65 VIRTIO_NET_F_GUEST_HDRLEN 66 }; 67 68 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 69 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 70 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 71 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ 72 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \ 73 (1ULL << VIRTIO_NET_F_GUEST_USO6)) 74 75 struct virtnet_stat_desc { 76 char desc[ETH_GSTRING_LEN]; 77 size_t offset; 78 size_t qstat_offset; 79 }; 80 81 struct virtnet_sq_free_stats { 82 u64 packets; 83 u64 bytes; 84 u64 napi_packets; 85 u64 napi_bytes; 86 u64 xsk; 87 }; 88 89 struct virtnet_sq_stats { 90 struct u64_stats_sync syncp; 91 u64_stats_t packets; 92 u64_stats_t bytes; 93 u64_stats_t xdp_tx; 94 u64_stats_t xdp_tx_drops; 95 u64_stats_t kicks; 96 u64_stats_t tx_timeouts; 97 u64_stats_t stop; 98 u64_stats_t wake; 99 }; 100 101 struct virtnet_rq_stats { 102 struct u64_stats_sync syncp; 103 u64_stats_t packets; 104 u64_stats_t bytes; 105 u64_stats_t drops; 106 u64_stats_t xdp_packets; 107 u64_stats_t xdp_tx; 108 u64_stats_t xdp_redirects; 109 u64_stats_t xdp_drops; 110 u64_stats_t kicks; 111 }; 112 113 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1} 114 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1} 115 116 #define VIRTNET_SQ_STAT_QSTAT(name, m) \ 117 { \ 118 name, \ 119 offsetof(struct virtnet_sq_stats, m), \ 120 offsetof(struct netdev_queue_stats_tx, m), \ 121 } 122 123 #define VIRTNET_RQ_STAT_QSTAT(name, m) \ 124 { \ 125 name, \ 126 offsetof(struct virtnet_rq_stats, m), \ 127 offsetof(struct netdev_queue_stats_rx, m), \ 128 } 129 130 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { 131 VIRTNET_SQ_STAT("xdp_tx", xdp_tx), 132 VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops), 133 VIRTNET_SQ_STAT("kicks", kicks), 134 VIRTNET_SQ_STAT("tx_timeouts", tx_timeouts), 135 }; 136 137 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { 138 VIRTNET_RQ_STAT("drops", drops), 139 VIRTNET_RQ_STAT("xdp_packets", xdp_packets), 140 VIRTNET_RQ_STAT("xdp_tx", xdp_tx), 141 VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects), 142 VIRTNET_RQ_STAT("xdp_drops", xdp_drops), 143 VIRTNET_RQ_STAT("kicks", kicks), 144 }; 145 146 static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = { 147 VIRTNET_SQ_STAT_QSTAT("packets", packets), 148 VIRTNET_SQ_STAT_QSTAT("bytes", bytes), 149 VIRTNET_SQ_STAT_QSTAT("stop", stop), 150 VIRTNET_SQ_STAT_QSTAT("wake", wake), 151 }; 152 153 static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = { 154 VIRTNET_RQ_STAT_QSTAT("packets", packets), 155 VIRTNET_RQ_STAT_QSTAT("bytes", bytes), 156 }; 157 158 #define VIRTNET_STATS_DESC_CQ(name) \ 159 {#name, offsetof(struct virtio_net_stats_cvq, name), -1} 160 161 #define VIRTNET_STATS_DESC_RX(class, name) \ 162 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1} 163 164 #define VIRTNET_STATS_DESC_TX(class, name) \ 165 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1} 166 167 168 static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = { 169 VIRTNET_STATS_DESC_CQ(command_num), 170 VIRTNET_STATS_DESC_CQ(ok_num), 171 }; 172 173 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = { 174 VIRTNET_STATS_DESC_RX(basic, packets), 175 VIRTNET_STATS_DESC_RX(basic, bytes), 176 177 VIRTNET_STATS_DESC_RX(basic, notifications), 178 VIRTNET_STATS_DESC_RX(basic, interrupts), 179 }; 180 181 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = { 182 VIRTNET_STATS_DESC_TX(basic, packets), 183 VIRTNET_STATS_DESC_TX(basic, bytes), 184 185 VIRTNET_STATS_DESC_TX(basic, notifications), 186 VIRTNET_STATS_DESC_TX(basic, interrupts), 187 }; 188 189 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = { 190 VIRTNET_STATS_DESC_RX(csum, needs_csum), 191 }; 192 193 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = { 194 VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg), 195 VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg), 196 }; 197 198 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = { 199 VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes), 200 }; 201 202 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = { 203 VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes), 204 }; 205 206 #define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field) \ 207 { \ 208 #name, \ 209 offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), \ 210 offsetof(struct netdev_queue_stats_rx, qstat_field), \ 211 } 212 213 #define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field) \ 214 { \ 215 #name, \ 216 offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), \ 217 offsetof(struct netdev_queue_stats_tx, qstat_field), \ 218 } 219 220 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = { 221 VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops), 222 VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns), 223 }; 224 225 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = { 226 VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops), 227 VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors), 228 }; 229 230 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = { 231 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary), 232 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none), 233 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad), 234 }; 235 236 static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = { 237 VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none), 238 VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum), 239 }; 240 241 static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = { 242 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets), 243 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes), 244 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets), 245 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes), 246 }; 247 248 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = { 249 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets), 250 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes), 251 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets), 252 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes), 253 }; 254 255 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = { 256 VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits), 257 }; 258 259 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = { 260 VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits), 261 }; 262 263 #define VIRTNET_Q_TYPE_RX 0 264 #define VIRTNET_Q_TYPE_TX 1 265 #define VIRTNET_Q_TYPE_CQ 2 266 267 struct virtnet_interrupt_coalesce { 268 u32 max_packets; 269 u32 max_usecs; 270 }; 271 272 /* The dma information of pages allocated at a time. */ 273 struct virtnet_rq_dma { 274 dma_addr_t addr; 275 u32 ref; 276 u16 len; 277 u16 need_sync; 278 }; 279 280 /* Internal representation of a send virtqueue */ 281 struct send_queue { 282 /* Virtqueue associated with this send _queue */ 283 struct virtqueue *vq; 284 285 /* TX: fragments + linear part + virtio header */ 286 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 287 288 /* Name of the send queue: output.$index */ 289 char name[16]; 290 291 struct virtnet_sq_stats stats; 292 293 struct virtnet_interrupt_coalesce intr_coal; 294 295 struct napi_struct napi; 296 297 /* Record whether sq is in reset state. */ 298 bool reset; 299 300 struct xsk_buff_pool *xsk_pool; 301 302 dma_addr_t xsk_hdr_dma_addr; 303 }; 304 305 /* Internal representation of a receive virtqueue */ 306 struct receive_queue { 307 /* Virtqueue associated with this receive_queue */ 308 struct virtqueue *vq; 309 310 struct napi_struct napi; 311 312 struct bpf_prog __rcu *xdp_prog; 313 314 struct virtnet_rq_stats stats; 315 316 /* The number of rx notifications */ 317 u16 calls; 318 319 /* Is dynamic interrupt moderation enabled? */ 320 bool dim_enabled; 321 322 /* Used to protect dim_enabled and inter_coal */ 323 struct mutex dim_lock; 324 325 /* Dynamic Interrupt Moderation */ 326 struct dim dim; 327 328 u32 packets_in_napi; 329 330 struct virtnet_interrupt_coalesce intr_coal; 331 332 /* Chain pages by the private ptr. */ 333 struct page *pages; 334 335 /* Average packet length for mergeable receive buffers. */ 336 struct ewma_pkt_len mrg_avg_pkt_len; 337 338 /* Page frag for packet buffer allocation. */ 339 struct page_frag alloc_frag; 340 341 /* RX: fragments + linear part + virtio header */ 342 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 343 344 /* Min single buffer size for mergeable buffers case. */ 345 unsigned int min_buf_len; 346 347 /* Name of this receive queue: input.$index */ 348 char name[16]; 349 350 struct xdp_rxq_info xdp_rxq; 351 352 /* Record the last dma info to free after new pages is allocated. */ 353 struct virtnet_rq_dma *last_dma; 354 355 struct xsk_buff_pool *xsk_pool; 356 357 /* xdp rxq used by xsk */ 358 struct xdp_rxq_info xsk_rxq_info; 359 360 struct xdp_buff **xsk_buffs; 361 }; 362 363 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 364 365 /* Control VQ buffers: protected by the rtnl lock */ 366 struct control_buf { 367 struct virtio_net_ctrl_hdr hdr; 368 virtio_net_ctrl_ack status; 369 }; 370 371 struct virtnet_info { 372 struct virtio_device *vdev; 373 struct virtqueue *cvq; 374 struct net_device *dev; 375 struct send_queue *sq; 376 struct receive_queue *rq; 377 unsigned int status; 378 379 /* Max # of queue pairs supported by the device */ 380 u16 max_queue_pairs; 381 382 /* # of queue pairs currently used by the driver */ 383 u16 curr_queue_pairs; 384 385 /* # of XDP queue pairs currently used by the driver */ 386 u16 xdp_queue_pairs; 387 388 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ 389 bool xdp_enabled; 390 391 /* I like... big packets and I cannot lie! */ 392 bool big_packets; 393 394 /* number of sg entries allocated for big packets */ 395 unsigned int big_packets_num_skbfrags; 396 397 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 398 bool mergeable_rx_bufs; 399 400 /* Host supports rss and/or hash report */ 401 bool has_rss; 402 bool has_rss_hash_report; 403 u8 rss_key_size; 404 u16 rss_indir_table_size; 405 u32 rss_hash_types_supported; 406 u32 rss_hash_types_saved; 407 struct virtio_net_rss_config_hdr *rss_hdr; 408 struct virtio_net_rss_config_trailer rss_trailer; 409 u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE]; 410 411 /* Has control virtqueue */ 412 bool has_cvq; 413 414 /* Lock to protect the control VQ */ 415 struct mutex cvq_lock; 416 417 /* Host can handle any s/g split between our header and packet data */ 418 bool any_header_sg; 419 420 /* Packet virtio header size */ 421 u8 hdr_len; 422 423 /* Work struct for delayed refilling if we run low on memory. */ 424 struct delayed_work refill; 425 426 /* Is delayed refill enabled? */ 427 bool refill_enabled; 428 429 /* The lock to synchronize the access to refill_enabled */ 430 spinlock_t refill_lock; 431 432 /* Work struct for config space updates */ 433 struct work_struct config_work; 434 435 /* Work struct for setting rx mode */ 436 struct work_struct rx_mode_work; 437 438 /* OK to queue work setting RX mode? */ 439 bool rx_mode_work_enabled; 440 441 /* Does the affinity hint is set for virtqueues? */ 442 bool affinity_hint_set; 443 444 /* CPU hotplug instances for online & dead */ 445 struct hlist_node node; 446 struct hlist_node node_dead; 447 448 struct control_buf *ctrl; 449 450 /* Ethtool settings */ 451 u8 duplex; 452 u32 speed; 453 454 /* Is rx dynamic interrupt moderation enabled? */ 455 bool rx_dim_enabled; 456 457 /* Interrupt coalescing settings */ 458 struct virtnet_interrupt_coalesce intr_coal_tx; 459 struct virtnet_interrupt_coalesce intr_coal_rx; 460 461 unsigned long guest_offloads; 462 unsigned long guest_offloads_capable; 463 464 /* failover when STANDBY feature enabled */ 465 struct failover *failover; 466 467 u64 device_stats_cap; 468 }; 469 470 struct padded_vnet_hdr { 471 struct virtio_net_hdr_v1_hash hdr; 472 /* 473 * hdr is in a separate sg buffer, and data sg buffer shares same page 474 * with this header sg. This padding makes next sg 16 byte aligned 475 * after the header. 476 */ 477 char padding[12]; 478 }; 479 480 struct virtio_net_common_hdr { 481 union { 482 struct virtio_net_hdr hdr; 483 struct virtio_net_hdr_mrg_rxbuf mrg_hdr; 484 struct virtio_net_hdr_v1_hash hash_v1_hdr; 485 }; 486 }; 487 488 static struct virtio_net_common_hdr xsk_hdr; 489 490 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); 491 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq); 492 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, 493 struct net_device *dev, 494 unsigned int *xdp_xmit, 495 struct virtnet_rq_stats *stats); 496 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, 497 struct sk_buff *skb, u8 flags); 498 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, 499 struct sk_buff *curr_skb, 500 struct page *page, void *buf, 501 int len, int truesize); 502 static void virtnet_xsk_completed(struct send_queue *sq, int num); 503 504 enum virtnet_xmit_type { 505 VIRTNET_XMIT_TYPE_SKB, 506 VIRTNET_XMIT_TYPE_SKB_ORPHAN, 507 VIRTNET_XMIT_TYPE_XDP, 508 VIRTNET_XMIT_TYPE_XSK, 509 }; 510 511 static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi) 512 { 513 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1; 514 515 return struct_size(vi->rss_hdr, indirection_table, indir_table_size); 516 } 517 518 static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi) 519 { 520 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size); 521 } 522 523 /* We use the last two bits of the pointer to distinguish the xmit type. */ 524 #define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1)) 525 526 #define VIRTIO_XSK_FLAG_OFFSET 2 527 528 static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr) 529 { 530 unsigned long p = (unsigned long)*ptr; 531 532 *ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK); 533 534 return p & VIRTNET_XMIT_TYPE_MASK; 535 } 536 537 static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type) 538 { 539 return (void *)((unsigned long)ptr | type); 540 } 541 542 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data, 543 enum virtnet_xmit_type type) 544 { 545 return virtqueue_add_outbuf(sq->vq, sq->sg, num, 546 virtnet_xmit_ptr_pack(data, type), 547 GFP_ATOMIC); 548 } 549 550 static u32 virtnet_ptr_to_xsk_buff_len(void *ptr) 551 { 552 return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET; 553 } 554 555 static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) 556 { 557 sg_dma_address(sg) = addr; 558 sg_dma_len(sg) = len; 559 } 560 561 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, 562 bool in_napi, struct virtnet_sq_free_stats *stats) 563 { 564 struct xdp_frame *frame; 565 struct sk_buff *skb; 566 unsigned int len; 567 void *ptr; 568 569 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { 570 switch (virtnet_xmit_ptr_unpack(&ptr)) { 571 case VIRTNET_XMIT_TYPE_SKB: 572 skb = ptr; 573 574 pr_debug("Sent skb %p\n", skb); 575 stats->napi_packets++; 576 stats->napi_bytes += skb->len; 577 napi_consume_skb(skb, in_napi); 578 break; 579 580 case VIRTNET_XMIT_TYPE_SKB_ORPHAN: 581 skb = ptr; 582 583 stats->packets++; 584 stats->bytes += skb->len; 585 napi_consume_skb(skb, in_napi); 586 break; 587 588 case VIRTNET_XMIT_TYPE_XDP: 589 frame = ptr; 590 591 stats->packets++; 592 stats->bytes += xdp_get_frame_len(frame); 593 xdp_return_frame(frame); 594 break; 595 596 case VIRTNET_XMIT_TYPE_XSK: 597 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr); 598 stats->xsk++; 599 break; 600 } 601 } 602 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); 603 } 604 605 static void virtnet_free_old_xmit(struct send_queue *sq, 606 struct netdev_queue *txq, 607 bool in_napi, 608 struct virtnet_sq_free_stats *stats) 609 { 610 __free_old_xmit(sq, txq, in_napi, stats); 611 612 if (stats->xsk) 613 virtnet_xsk_completed(sq, stats->xsk); 614 } 615 616 /* Converting between virtqueue no. and kernel tx/rx queue no. 617 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 618 */ 619 static int vq2txq(struct virtqueue *vq) 620 { 621 return (vq->index - 1) / 2; 622 } 623 624 static int txq2vq(int txq) 625 { 626 return txq * 2 + 1; 627 } 628 629 static int vq2rxq(struct virtqueue *vq) 630 { 631 return vq->index / 2; 632 } 633 634 static int rxq2vq(int rxq) 635 { 636 return rxq * 2; 637 } 638 639 static int vq_type(struct virtnet_info *vi, int qid) 640 { 641 if (qid == vi->max_queue_pairs * 2) 642 return VIRTNET_Q_TYPE_CQ; 643 644 if (qid % 2) 645 return VIRTNET_Q_TYPE_TX; 646 647 return VIRTNET_Q_TYPE_RX; 648 } 649 650 static inline struct virtio_net_common_hdr * 651 skb_vnet_common_hdr(struct sk_buff *skb) 652 { 653 return (struct virtio_net_common_hdr *)skb->cb; 654 } 655 656 /* 657 * private is used to chain pages for big packets, put the whole 658 * most recent used list in the beginning for reuse 659 */ 660 static void give_pages(struct receive_queue *rq, struct page *page) 661 { 662 struct page *end; 663 664 /* Find end of list, sew whole thing into vi->rq.pages. */ 665 for (end = page; end->private; end = (struct page *)end->private); 666 end->private = (unsigned long)rq->pages; 667 rq->pages = page; 668 } 669 670 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) 671 { 672 struct page *p = rq->pages; 673 674 if (p) { 675 rq->pages = (struct page *)p->private; 676 /* clear private here, it is used to chain pages */ 677 p->private = 0; 678 } else 679 p = alloc_page(gfp_mask); 680 return p; 681 } 682 683 static void virtnet_rq_free_buf(struct virtnet_info *vi, 684 struct receive_queue *rq, void *buf) 685 { 686 if (vi->mergeable_rx_bufs) 687 put_page(virt_to_head_page(buf)); 688 else if (vi->big_packets) 689 give_pages(rq, buf); 690 else 691 put_page(virt_to_head_page(buf)); 692 } 693 694 static void enable_delayed_refill(struct virtnet_info *vi) 695 { 696 spin_lock_bh(&vi->refill_lock); 697 vi->refill_enabled = true; 698 spin_unlock_bh(&vi->refill_lock); 699 } 700 701 static void disable_delayed_refill(struct virtnet_info *vi) 702 { 703 spin_lock_bh(&vi->refill_lock); 704 vi->refill_enabled = false; 705 spin_unlock_bh(&vi->refill_lock); 706 } 707 708 static void enable_rx_mode_work(struct virtnet_info *vi) 709 { 710 rtnl_lock(); 711 vi->rx_mode_work_enabled = true; 712 rtnl_unlock(); 713 } 714 715 static void disable_rx_mode_work(struct virtnet_info *vi) 716 { 717 rtnl_lock(); 718 vi->rx_mode_work_enabled = false; 719 rtnl_unlock(); 720 } 721 722 static void virtqueue_napi_schedule(struct napi_struct *napi, 723 struct virtqueue *vq) 724 { 725 if (napi_schedule_prep(napi)) { 726 virtqueue_disable_cb(vq); 727 __napi_schedule(napi); 728 } 729 } 730 731 static bool virtqueue_napi_complete(struct napi_struct *napi, 732 struct virtqueue *vq, int processed) 733 { 734 int opaque; 735 736 opaque = virtqueue_enable_cb_prepare(vq); 737 if (napi_complete_done(napi, processed)) { 738 if (unlikely(virtqueue_poll(vq, opaque))) 739 virtqueue_napi_schedule(napi, vq); 740 else 741 return true; 742 } else { 743 virtqueue_disable_cb(vq); 744 } 745 746 return false; 747 } 748 749 static void skb_xmit_done(struct virtqueue *vq) 750 { 751 struct virtnet_info *vi = vq->vdev->priv; 752 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 753 754 /* Suppress further interrupts. */ 755 virtqueue_disable_cb(vq); 756 757 if (napi->weight) 758 virtqueue_napi_schedule(napi, vq); 759 else 760 /* We were probably waiting for more output buffers. */ 761 netif_wake_subqueue(vi->dev, vq2txq(vq)); 762 } 763 764 #define MRG_CTX_HEADER_SHIFT 22 765 static void *mergeable_len_to_ctx(unsigned int truesize, 766 unsigned int headroom) 767 { 768 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); 769 } 770 771 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) 772 { 773 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; 774 } 775 776 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) 777 { 778 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); 779 } 780 781 static int check_mergeable_len(struct net_device *dev, void *mrg_ctx, 782 unsigned int len) 783 { 784 unsigned int headroom, tailroom, room, truesize; 785 786 truesize = mergeable_ctx_to_truesize(mrg_ctx); 787 headroom = mergeable_ctx_to_headroom(mrg_ctx); 788 tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 789 room = SKB_DATA_ALIGN(headroom + tailroom); 790 791 if (len > truesize - room) { 792 pr_debug("%s: rx error: len %u exceeds truesize %lu\n", 793 dev->name, len, (unsigned long)(truesize - room)); 794 DEV_STATS_INC(dev, rx_length_errors); 795 return -1; 796 } 797 798 return 0; 799 } 800 801 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, 802 unsigned int headroom, 803 unsigned int len) 804 { 805 struct sk_buff *skb; 806 807 skb = build_skb(buf, buflen); 808 if (unlikely(!skb)) 809 return NULL; 810 811 skb_reserve(skb, headroom); 812 skb_put(skb, len); 813 814 return skb; 815 } 816 817 /* Called from bottom half context */ 818 static struct sk_buff *page_to_skb(struct virtnet_info *vi, 819 struct receive_queue *rq, 820 struct page *page, unsigned int offset, 821 unsigned int len, unsigned int truesize, 822 unsigned int headroom) 823 { 824 struct sk_buff *skb; 825 struct virtio_net_common_hdr *hdr; 826 unsigned int copy, hdr_len, hdr_padded_len; 827 struct page *page_to_free = NULL; 828 int tailroom, shinfo_size; 829 char *p, *hdr_p, *buf; 830 831 p = page_address(page) + offset; 832 hdr_p = p; 833 834 hdr_len = vi->hdr_len; 835 if (vi->mergeable_rx_bufs) 836 hdr_padded_len = hdr_len; 837 else 838 hdr_padded_len = sizeof(struct padded_vnet_hdr); 839 840 buf = p - headroom; 841 len -= hdr_len; 842 offset += hdr_padded_len; 843 p += hdr_padded_len; 844 tailroom = truesize - headroom - hdr_padded_len - len; 845 846 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 847 848 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { 849 skb = virtnet_build_skb(buf, truesize, p - buf, len); 850 if (unlikely(!skb)) 851 return NULL; 852 853 page = (struct page *)page->private; 854 if (page) 855 give_pages(rq, page); 856 goto ok; 857 } 858 859 /* copy small packet so we can reuse these pages for small data */ 860 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); 861 if (unlikely(!skb)) 862 return NULL; 863 864 /* Copy all frame if it fits skb->head, otherwise 865 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. 866 */ 867 if (len <= skb_tailroom(skb)) 868 copy = len; 869 else 870 copy = ETH_HLEN; 871 skb_put_data(skb, p, copy); 872 873 len -= copy; 874 offset += copy; 875 876 if (vi->mergeable_rx_bufs) { 877 if (len) 878 skb_add_rx_frag(skb, 0, page, offset, len, truesize); 879 else 880 page_to_free = page; 881 goto ok; 882 } 883 884 /* 885 * Verify that we can indeed put this data into a skb. 886 * This is here to handle cases when the device erroneously 887 * tries to receive more than is possible. This is usually 888 * the case of a broken device. 889 */ 890 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 891 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); 892 dev_kfree_skb(skb); 893 return NULL; 894 } 895 BUG_ON(offset >= PAGE_SIZE); 896 while (len) { 897 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); 898 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, 899 frag_size, truesize); 900 len -= frag_size; 901 page = (struct page *)page->private; 902 offset = 0; 903 } 904 905 if (page) 906 give_pages(rq, page); 907 908 ok: 909 hdr = skb_vnet_common_hdr(skb); 910 memcpy(hdr, hdr_p, hdr_len); 911 if (page_to_free) 912 put_page(page_to_free); 913 914 return skb; 915 } 916 917 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) 918 { 919 struct virtnet_info *vi = rq->vq->vdev->priv; 920 struct page *page = virt_to_head_page(buf); 921 struct virtnet_rq_dma *dma; 922 void *head; 923 int offset; 924 925 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); 926 927 head = page_address(page); 928 929 dma = head; 930 931 --dma->ref; 932 933 if (dma->need_sync && len) { 934 offset = buf - (head + sizeof(*dma)); 935 936 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, 937 offset, len, 938 DMA_FROM_DEVICE); 939 } 940 941 if (dma->ref) 942 return; 943 944 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, 945 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 946 put_page(page); 947 } 948 949 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) 950 { 951 struct virtnet_info *vi = rq->vq->vdev->priv; 952 void *buf; 953 954 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); 955 956 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); 957 if (buf) 958 virtnet_rq_unmap(rq, buf, *len); 959 960 return buf; 961 } 962 963 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) 964 { 965 struct virtnet_info *vi = rq->vq->vdev->priv; 966 struct virtnet_rq_dma *dma; 967 dma_addr_t addr; 968 u32 offset; 969 void *head; 970 971 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); 972 973 head = page_address(rq->alloc_frag.page); 974 975 offset = buf - head; 976 977 dma = head; 978 979 addr = dma->addr - sizeof(*dma) + offset; 980 981 sg_init_table(rq->sg, 1); 982 sg_fill_dma(rq->sg, addr, len); 983 } 984 985 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) 986 { 987 struct page_frag *alloc_frag = &rq->alloc_frag; 988 struct virtnet_info *vi = rq->vq->vdev->priv; 989 struct virtnet_rq_dma *dma; 990 void *buf, *head; 991 dma_addr_t addr; 992 993 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); 994 995 head = page_address(alloc_frag->page); 996 997 dma = head; 998 999 /* new pages */ 1000 if (!alloc_frag->offset) { 1001 if (rq->last_dma) { 1002 /* Now, the new page is allocated, the last dma 1003 * will not be used. So the dma can be unmapped 1004 * if the ref is 0. 1005 */ 1006 virtnet_rq_unmap(rq, rq->last_dma, 0); 1007 rq->last_dma = NULL; 1008 } 1009 1010 dma->len = alloc_frag->size - sizeof(*dma); 1011 1012 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, 1013 dma->len, DMA_FROM_DEVICE, 0); 1014 if (virtqueue_dma_mapping_error(rq->vq, addr)) 1015 return NULL; 1016 1017 dma->addr = addr; 1018 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); 1019 1020 /* Add a reference to dma to prevent the entire dma from 1021 * being released during error handling. This reference 1022 * will be freed after the pages are no longer used. 1023 */ 1024 get_page(alloc_frag->page); 1025 dma->ref = 1; 1026 alloc_frag->offset = sizeof(*dma); 1027 1028 rq->last_dma = dma; 1029 } 1030 1031 ++dma->ref; 1032 1033 buf = head + alloc_frag->offset; 1034 1035 get_page(alloc_frag->page); 1036 alloc_frag->offset += size; 1037 1038 return buf; 1039 } 1040 1041 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) 1042 { 1043 struct virtnet_info *vi = vq->vdev->priv; 1044 struct receive_queue *rq; 1045 int i = vq2rxq(vq); 1046 1047 rq = &vi->rq[i]; 1048 1049 if (rq->xsk_pool) { 1050 xsk_buff_free((struct xdp_buff *)buf); 1051 return; 1052 } 1053 1054 if (!vi->big_packets || vi->mergeable_rx_bufs) 1055 virtnet_rq_unmap(rq, buf, 0); 1056 1057 virtnet_rq_free_buf(vi, rq, buf); 1058 } 1059 1060 static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, 1061 bool in_napi) 1062 { 1063 struct virtnet_sq_free_stats stats = {0}; 1064 1065 virtnet_free_old_xmit(sq, txq, in_napi, &stats); 1066 1067 /* Avoid overhead when no packets have been processed 1068 * happens when called speculatively from start_xmit. 1069 */ 1070 if (!stats.packets && !stats.napi_packets) 1071 return; 1072 1073 u64_stats_update_begin(&sq->stats.syncp); 1074 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); 1075 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); 1076 u64_stats_update_end(&sq->stats.syncp); 1077 } 1078 1079 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) 1080 { 1081 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1082 return false; 1083 else if (q < vi->curr_queue_pairs) 1084 return true; 1085 else 1086 return false; 1087 } 1088 1089 static bool tx_may_stop(struct virtnet_info *vi, 1090 struct net_device *dev, 1091 struct send_queue *sq) 1092 { 1093 int qnum; 1094 1095 qnum = sq - vi->sq; 1096 1097 /* If running out of space, stop queue to avoid getting packets that we 1098 * are then unable to transmit. 1099 * An alternative would be to force queuing layer to requeue the skb by 1100 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be 1101 * returned in a normal path of operation: it means that driver is not 1102 * maintaining the TX queue stop/start state properly, and causes 1103 * the stack to do a non-trivial amount of useless work. 1104 * Since most packets only take 1 or 2 ring slots, stopping the queue 1105 * early means 16 slots are typically wasted. 1106 */ 1107 if (sq->vq->num_free < MAX_SKB_FRAGS + 2) { 1108 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 1109 1110 netif_tx_stop_queue(txq); 1111 u64_stats_update_begin(&sq->stats.syncp); 1112 u64_stats_inc(&sq->stats.stop); 1113 u64_stats_update_end(&sq->stats.syncp); 1114 1115 return true; 1116 } 1117 1118 return false; 1119 } 1120 1121 static void check_sq_full_and_disable(struct virtnet_info *vi, 1122 struct net_device *dev, 1123 struct send_queue *sq) 1124 { 1125 bool use_napi = sq->napi.weight; 1126 int qnum; 1127 1128 qnum = sq - vi->sq; 1129 1130 if (tx_may_stop(vi, dev, sq)) { 1131 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 1132 1133 if (use_napi) { 1134 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 1135 virtqueue_napi_schedule(&sq->napi, sq->vq); 1136 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1137 /* More just got used, free them then recheck. */ 1138 free_old_xmit(sq, txq, false); 1139 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) { 1140 netif_start_subqueue(dev, qnum); 1141 u64_stats_update_begin(&sq->stats.syncp); 1142 u64_stats_inc(&sq->stats.wake); 1143 u64_stats_update_end(&sq->stats.syncp); 1144 virtqueue_disable_cb(sq->vq); 1145 } 1146 } 1147 } 1148 } 1149 1150 /* Note that @len is the length of received data without virtio header */ 1151 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, 1152 struct receive_queue *rq, void *buf, 1153 u32 len, bool first_buf) 1154 { 1155 struct xdp_buff *xdp; 1156 u32 bufsize; 1157 1158 xdp = (struct xdp_buff *)buf; 1159 1160 /* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for 1161 * virtio header and ask the vhost to fill data from 1162 * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len 1163 * The first buffer has virtio header so the remaining region for frame 1164 * data is 1165 * xsk_pool_get_rx_frame_size() 1166 * While other buffers than the first one do not have virtio header, so 1167 * the maximum frame data's length can be 1168 * xsk_pool_get_rx_frame_size() + vi->hdr_len 1169 */ 1170 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool); 1171 if (!first_buf) 1172 bufsize += vi->hdr_len; 1173 1174 if (unlikely(len > bufsize)) { 1175 pr_debug("%s: rx error: len %u exceeds truesize %u\n", 1176 vi->dev->name, len, bufsize); 1177 DEV_STATS_INC(vi->dev, rx_length_errors); 1178 xsk_buff_free(xdp); 1179 return NULL; 1180 } 1181 1182 xsk_buff_set_size(xdp, len); 1183 xsk_buff_dma_sync_for_cpu(xdp); 1184 1185 return xdp; 1186 } 1187 1188 static struct sk_buff *xsk_construct_skb(struct receive_queue *rq, 1189 struct xdp_buff *xdp) 1190 { 1191 unsigned int metasize = xdp->data - xdp->data_meta; 1192 struct sk_buff *skb; 1193 unsigned int size; 1194 1195 size = xdp->data_end - xdp->data_hard_start; 1196 skb = napi_alloc_skb(&rq->napi, size); 1197 if (unlikely(!skb)) { 1198 xsk_buff_free(xdp); 1199 return NULL; 1200 } 1201 1202 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); 1203 1204 size = xdp->data_end - xdp->data_meta; 1205 memcpy(__skb_put(skb, size), xdp->data_meta, size); 1206 1207 if (metasize) { 1208 __skb_pull(skb, metasize); 1209 skb_metadata_set(skb, metasize); 1210 } 1211 1212 xsk_buff_free(xdp); 1213 1214 return skb; 1215 } 1216 1217 static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, 1218 struct receive_queue *rq, struct xdp_buff *xdp, 1219 unsigned int *xdp_xmit, 1220 struct virtnet_rq_stats *stats) 1221 { 1222 struct bpf_prog *prog; 1223 u32 ret; 1224 1225 ret = XDP_PASS; 1226 rcu_read_lock(); 1227 prog = rcu_dereference(rq->xdp_prog); 1228 if (prog) 1229 ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); 1230 rcu_read_unlock(); 1231 1232 switch (ret) { 1233 case XDP_PASS: 1234 return xsk_construct_skb(rq, xdp); 1235 1236 case XDP_TX: 1237 case XDP_REDIRECT: 1238 return NULL; 1239 1240 default: 1241 /* drop packet */ 1242 xsk_buff_free(xdp); 1243 u64_stats_inc(&stats->drops); 1244 return NULL; 1245 } 1246 } 1247 1248 static void xsk_drop_follow_bufs(struct net_device *dev, 1249 struct receive_queue *rq, 1250 u32 num_buf, 1251 struct virtnet_rq_stats *stats) 1252 { 1253 struct xdp_buff *xdp; 1254 u32 len; 1255 1256 while (num_buf-- > 1) { 1257 xdp = virtqueue_get_buf(rq->vq, &len); 1258 if (unlikely(!xdp)) { 1259 pr_debug("%s: rx error: %d buffers missing\n", 1260 dev->name, num_buf); 1261 DEV_STATS_INC(dev, rx_length_errors); 1262 break; 1263 } 1264 u64_stats_add(&stats->bytes, len); 1265 xsk_buff_free(xdp); 1266 } 1267 } 1268 1269 static int xsk_append_merge_buffer(struct virtnet_info *vi, 1270 struct receive_queue *rq, 1271 struct sk_buff *head_skb, 1272 u32 num_buf, 1273 struct virtio_net_hdr_mrg_rxbuf *hdr, 1274 struct virtnet_rq_stats *stats) 1275 { 1276 struct sk_buff *curr_skb; 1277 struct xdp_buff *xdp; 1278 u32 len, truesize; 1279 struct page *page; 1280 void *buf; 1281 1282 curr_skb = head_skb; 1283 1284 while (--num_buf) { 1285 buf = virtqueue_get_buf(rq->vq, &len); 1286 if (unlikely(!buf)) { 1287 pr_debug("%s: rx error: %d buffers out of %d missing\n", 1288 vi->dev->name, num_buf, 1289 virtio16_to_cpu(vi->vdev, 1290 hdr->num_buffers)); 1291 DEV_STATS_INC(vi->dev, rx_length_errors); 1292 return -EINVAL; 1293 } 1294 1295 u64_stats_add(&stats->bytes, len); 1296 1297 xdp = buf_to_xdp(vi, rq, buf, len, false); 1298 if (!xdp) 1299 goto err; 1300 1301 buf = napi_alloc_frag(len); 1302 if (!buf) { 1303 xsk_buff_free(xdp); 1304 goto err; 1305 } 1306 1307 memcpy(buf, xdp->data - vi->hdr_len, len); 1308 1309 xsk_buff_free(xdp); 1310 1311 page = virt_to_page(buf); 1312 1313 truesize = len; 1314 1315 curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page, 1316 buf, len, truesize); 1317 if (!curr_skb) { 1318 put_page(page); 1319 goto err; 1320 } 1321 } 1322 1323 return 0; 1324 1325 err: 1326 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); 1327 return -EINVAL; 1328 } 1329 1330 static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi, 1331 struct receive_queue *rq, struct xdp_buff *xdp, 1332 unsigned int *xdp_xmit, 1333 struct virtnet_rq_stats *stats) 1334 { 1335 struct virtio_net_hdr_mrg_rxbuf *hdr; 1336 struct bpf_prog *prog; 1337 struct sk_buff *skb; 1338 u32 ret, num_buf; 1339 1340 hdr = xdp->data - vi->hdr_len; 1341 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 1342 1343 ret = XDP_PASS; 1344 rcu_read_lock(); 1345 prog = rcu_dereference(rq->xdp_prog); 1346 /* TODO: support multi buffer. */ 1347 if (prog && num_buf == 1) 1348 ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); 1349 rcu_read_unlock(); 1350 1351 switch (ret) { 1352 case XDP_PASS: 1353 skb = xsk_construct_skb(rq, xdp); 1354 if (!skb) 1355 goto drop_bufs; 1356 1357 if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) { 1358 dev_kfree_skb(skb); 1359 goto drop; 1360 } 1361 1362 return skb; 1363 1364 case XDP_TX: 1365 case XDP_REDIRECT: 1366 return NULL; 1367 1368 default: 1369 /* drop packet */ 1370 xsk_buff_free(xdp); 1371 } 1372 1373 drop_bufs: 1374 xsk_drop_follow_bufs(dev, rq, num_buf, stats); 1375 1376 drop: 1377 u64_stats_inc(&stats->drops); 1378 return NULL; 1379 } 1380 1381 static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, 1382 void *buf, u32 len, 1383 unsigned int *xdp_xmit, 1384 struct virtnet_rq_stats *stats) 1385 { 1386 struct net_device *dev = vi->dev; 1387 struct sk_buff *skb = NULL; 1388 struct xdp_buff *xdp; 1389 u8 flags; 1390 1391 len -= vi->hdr_len; 1392 1393 u64_stats_add(&stats->bytes, len); 1394 1395 xdp = buf_to_xdp(vi, rq, buf, len, true); 1396 if (!xdp) 1397 return; 1398 1399 if (unlikely(len < ETH_HLEN)) { 1400 pr_debug("%s: short packet %i\n", dev->name, len); 1401 DEV_STATS_INC(dev, rx_length_errors); 1402 xsk_buff_free(xdp); 1403 return; 1404 } 1405 1406 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; 1407 1408 if (!vi->mergeable_rx_bufs) 1409 skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); 1410 else 1411 skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats); 1412 1413 if (skb) 1414 virtnet_receive_done(vi, rq, skb, flags); 1415 } 1416 1417 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, 1418 struct xsk_buff_pool *pool, gfp_t gfp) 1419 { 1420 struct xdp_buff **xsk_buffs; 1421 dma_addr_t addr; 1422 int err = 0; 1423 u32 len, i; 1424 int num; 1425 1426 xsk_buffs = rq->xsk_buffs; 1427 1428 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); 1429 if (!num) 1430 return -ENOMEM; 1431 1432 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; 1433 1434 for (i = 0; i < num; ++i) { 1435 /* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space. 1436 * We assume XDP_PACKET_HEADROOM is larger than hdr->len. 1437 * (see function virtnet_xsk_pool_enable) 1438 */ 1439 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; 1440 1441 sg_init_table(rq->sg, 1); 1442 sg_fill_dma(rq->sg, addr, len); 1443 1444 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, 1445 xsk_buffs[i], NULL, gfp); 1446 if (err) 1447 goto err; 1448 } 1449 1450 return num; 1451 1452 err: 1453 for (; i < num; ++i) 1454 xsk_buff_free(xsk_buffs[i]); 1455 1456 return err; 1457 } 1458 1459 static void *virtnet_xsk_to_ptr(u32 len) 1460 { 1461 unsigned long p; 1462 1463 p = len << VIRTIO_XSK_FLAG_OFFSET; 1464 1465 return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK); 1466 } 1467 1468 static int virtnet_xsk_xmit_one(struct send_queue *sq, 1469 struct xsk_buff_pool *pool, 1470 struct xdp_desc *desc) 1471 { 1472 struct virtnet_info *vi; 1473 dma_addr_t addr; 1474 1475 vi = sq->vq->vdev->priv; 1476 1477 addr = xsk_buff_raw_get_dma(pool, desc->addr); 1478 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len); 1479 1480 sg_init_table(sq->sg, 2); 1481 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); 1482 sg_fill_dma(sq->sg + 1, addr, desc->len); 1483 1484 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2, 1485 virtnet_xsk_to_ptr(desc->len), 1486 GFP_ATOMIC); 1487 } 1488 1489 static int virtnet_xsk_xmit_batch(struct send_queue *sq, 1490 struct xsk_buff_pool *pool, 1491 unsigned int budget, 1492 u64 *kicks) 1493 { 1494 struct xdp_desc *descs = pool->tx_descs; 1495 bool kick = false; 1496 u32 nb_pkts, i; 1497 int err; 1498 1499 budget = min_t(u32, budget, sq->vq->num_free); 1500 1501 nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget); 1502 if (!nb_pkts) 1503 return 0; 1504 1505 for (i = 0; i < nb_pkts; i++) { 1506 err = virtnet_xsk_xmit_one(sq, pool, &descs[i]); 1507 if (unlikely(err)) { 1508 xsk_tx_completed(sq->xsk_pool, nb_pkts - i); 1509 break; 1510 } 1511 1512 kick = true; 1513 } 1514 1515 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 1516 (*kicks)++; 1517 1518 return i; 1519 } 1520 1521 static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool, 1522 int budget) 1523 { 1524 struct virtnet_info *vi = sq->vq->vdev->priv; 1525 struct virtnet_sq_free_stats stats = {}; 1526 struct net_device *dev = vi->dev; 1527 u64 kicks = 0; 1528 int sent; 1529 1530 /* Avoid to wakeup napi meanless, so call __free_old_xmit instead of 1531 * free_old_xmit(). 1532 */ 1533 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); 1534 1535 if (stats.xsk) 1536 xsk_tx_completed(sq->xsk_pool, stats.xsk); 1537 1538 sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks); 1539 1540 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) 1541 check_sq_full_and_disable(vi, vi->dev, sq); 1542 1543 if (sent) { 1544 struct netdev_queue *txq; 1545 1546 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); 1547 txq_trans_cond_update(txq); 1548 } 1549 1550 u64_stats_update_begin(&sq->stats.syncp); 1551 u64_stats_add(&sq->stats.packets, stats.packets); 1552 u64_stats_add(&sq->stats.bytes, stats.bytes); 1553 u64_stats_add(&sq->stats.kicks, kicks); 1554 u64_stats_add(&sq->stats.xdp_tx, sent); 1555 u64_stats_update_end(&sq->stats.syncp); 1556 1557 if (xsk_uses_need_wakeup(pool)) 1558 xsk_set_tx_need_wakeup(pool); 1559 1560 return sent; 1561 } 1562 1563 static void xsk_wakeup(struct send_queue *sq) 1564 { 1565 if (napi_if_scheduled_mark_missed(&sq->napi)) 1566 return; 1567 1568 local_bh_disable(); 1569 virtqueue_napi_schedule(&sq->napi, sq->vq); 1570 local_bh_enable(); 1571 } 1572 1573 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag) 1574 { 1575 struct virtnet_info *vi = netdev_priv(dev); 1576 struct send_queue *sq; 1577 1578 if (!netif_running(dev)) 1579 return -ENETDOWN; 1580 1581 if (qid >= vi->curr_queue_pairs) 1582 return -EINVAL; 1583 1584 sq = &vi->sq[qid]; 1585 1586 xsk_wakeup(sq); 1587 return 0; 1588 } 1589 1590 static void virtnet_xsk_completed(struct send_queue *sq, int num) 1591 { 1592 xsk_tx_completed(sq->xsk_pool, num); 1593 1594 /* If this is called by rx poll, start_xmit and xdp xmit we should 1595 * wakeup the tx napi to consume the xsk tx queue, because the tx 1596 * interrupt may not be triggered. 1597 */ 1598 xsk_wakeup(sq); 1599 } 1600 1601 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, 1602 struct send_queue *sq, 1603 struct xdp_frame *xdpf) 1604 { 1605 struct virtio_net_hdr_mrg_rxbuf *hdr; 1606 struct skb_shared_info *shinfo; 1607 u8 nr_frags = 0; 1608 int err, i; 1609 1610 if (unlikely(xdpf->headroom < vi->hdr_len)) 1611 return -EOVERFLOW; 1612 1613 if (unlikely(xdp_frame_has_frags(xdpf))) { 1614 shinfo = xdp_get_shared_info_from_frame(xdpf); 1615 nr_frags = shinfo->nr_frags; 1616 } 1617 1618 /* In wrapping function virtnet_xdp_xmit(), we need to free 1619 * up the pending old buffers, where we need to calculate the 1620 * position of skb_shared_info in xdp_get_frame_len() and 1621 * xdp_return_frame(), which will involve to xdpf->data and 1622 * xdpf->headroom. Therefore, we need to update the value of 1623 * headroom synchronously here. 1624 */ 1625 xdpf->headroom -= vi->hdr_len; 1626 xdpf->data -= vi->hdr_len; 1627 /* Zero header and leave csum up to XDP layers */ 1628 hdr = xdpf->data; 1629 memset(hdr, 0, vi->hdr_len); 1630 xdpf->len += vi->hdr_len; 1631 1632 sg_init_table(sq->sg, nr_frags + 1); 1633 sg_set_buf(sq->sg, xdpf->data, xdpf->len); 1634 for (i = 0; i < nr_frags; i++) { 1635 skb_frag_t *frag = &shinfo->frags[i]; 1636 1637 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), 1638 skb_frag_size(frag), skb_frag_off(frag)); 1639 } 1640 1641 err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP); 1642 if (unlikely(err)) 1643 return -ENOSPC; /* Caller handle free/refcnt */ 1644 1645 return 0; 1646 } 1647 1648 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on 1649 * the current cpu, so it does not need to be locked. 1650 * 1651 * Here we use marco instead of inline functions because we have to deal with 1652 * three issues at the same time: 1. the choice of sq. 2. judge and execute the 1653 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline 1654 * functions to perfectly solve these three problems at the same time. 1655 */ 1656 #define virtnet_xdp_get_sq(vi) ({ \ 1657 int cpu = smp_processor_id(); \ 1658 struct netdev_queue *txq; \ 1659 typeof(vi) v = (vi); \ 1660 unsigned int qp; \ 1661 \ 1662 if (v->curr_queue_pairs > nr_cpu_ids) { \ 1663 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ 1664 qp += cpu; \ 1665 txq = netdev_get_tx_queue(v->dev, qp); \ 1666 __netif_tx_acquire(txq); \ 1667 } else { \ 1668 qp = cpu % v->curr_queue_pairs; \ 1669 txq = netdev_get_tx_queue(v->dev, qp); \ 1670 __netif_tx_lock(txq, cpu); \ 1671 } \ 1672 v->sq + qp; \ 1673 }) 1674 1675 #define virtnet_xdp_put_sq(vi, q) { \ 1676 struct netdev_queue *txq; \ 1677 typeof(vi) v = (vi); \ 1678 \ 1679 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ 1680 if (v->curr_queue_pairs > nr_cpu_ids) \ 1681 __netif_tx_release(txq); \ 1682 else \ 1683 __netif_tx_unlock(txq); \ 1684 } 1685 1686 static int virtnet_xdp_xmit(struct net_device *dev, 1687 int n, struct xdp_frame **frames, u32 flags) 1688 { 1689 struct virtnet_info *vi = netdev_priv(dev); 1690 struct virtnet_sq_free_stats stats = {0}; 1691 struct receive_queue *rq = vi->rq; 1692 struct bpf_prog *xdp_prog; 1693 struct send_queue *sq; 1694 int nxmit = 0; 1695 int kicks = 0; 1696 int ret; 1697 int i; 1698 1699 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 1700 * indicate XDP resources have been successfully allocated. 1701 */ 1702 xdp_prog = rcu_access_pointer(rq->xdp_prog); 1703 if (!xdp_prog) 1704 return -ENXIO; 1705 1706 sq = virtnet_xdp_get_sq(vi); 1707 1708 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { 1709 ret = -EINVAL; 1710 goto out; 1711 } 1712 1713 /* Free up any pending old buffers before queueing new ones. */ 1714 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), 1715 false, &stats); 1716 1717 for (i = 0; i < n; i++) { 1718 struct xdp_frame *xdpf = frames[i]; 1719 1720 if (__virtnet_xdp_xmit_one(vi, sq, xdpf)) 1721 break; 1722 nxmit++; 1723 } 1724 ret = nxmit; 1725 1726 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) 1727 check_sq_full_and_disable(vi, dev, sq); 1728 1729 if (flags & XDP_XMIT_FLUSH) { 1730 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) 1731 kicks = 1; 1732 } 1733 out: 1734 u64_stats_update_begin(&sq->stats.syncp); 1735 u64_stats_add(&sq->stats.bytes, stats.bytes); 1736 u64_stats_add(&sq->stats.packets, stats.packets); 1737 u64_stats_add(&sq->stats.xdp_tx, n); 1738 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); 1739 u64_stats_add(&sq->stats.kicks, kicks); 1740 u64_stats_update_end(&sq->stats.syncp); 1741 1742 virtnet_xdp_put_sq(vi, sq); 1743 return ret; 1744 } 1745 1746 static void put_xdp_frags(struct xdp_buff *xdp) 1747 { 1748 struct skb_shared_info *shinfo; 1749 struct page *xdp_page; 1750 int i; 1751 1752 if (xdp_buff_has_frags(xdp)) { 1753 shinfo = xdp_get_shared_info_from_buff(xdp); 1754 for (i = 0; i < shinfo->nr_frags; i++) { 1755 xdp_page = skb_frag_page(&shinfo->frags[i]); 1756 put_page(xdp_page); 1757 } 1758 } 1759 } 1760 1761 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, 1762 struct net_device *dev, 1763 unsigned int *xdp_xmit, 1764 struct virtnet_rq_stats *stats) 1765 { 1766 struct xdp_frame *xdpf; 1767 int err; 1768 u32 act; 1769 1770 act = bpf_prog_run_xdp(xdp_prog, xdp); 1771 u64_stats_inc(&stats->xdp_packets); 1772 1773 switch (act) { 1774 case XDP_PASS: 1775 return act; 1776 1777 case XDP_TX: 1778 u64_stats_inc(&stats->xdp_tx); 1779 xdpf = xdp_convert_buff_to_frame(xdp); 1780 if (unlikely(!xdpf)) { 1781 netdev_dbg(dev, "convert buff to frame failed for xdp\n"); 1782 return XDP_DROP; 1783 } 1784 1785 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); 1786 if (unlikely(!err)) { 1787 xdp_return_frame_rx_napi(xdpf); 1788 } else if (unlikely(err < 0)) { 1789 trace_xdp_exception(dev, xdp_prog, act); 1790 return XDP_DROP; 1791 } 1792 *xdp_xmit |= VIRTIO_XDP_TX; 1793 return act; 1794 1795 case XDP_REDIRECT: 1796 u64_stats_inc(&stats->xdp_redirects); 1797 err = xdp_do_redirect(dev, xdp, xdp_prog); 1798 if (err) 1799 return XDP_DROP; 1800 1801 *xdp_xmit |= VIRTIO_XDP_REDIR; 1802 return act; 1803 1804 default: 1805 bpf_warn_invalid_xdp_action(dev, xdp_prog, act); 1806 fallthrough; 1807 case XDP_ABORTED: 1808 trace_xdp_exception(dev, xdp_prog, act); 1809 fallthrough; 1810 case XDP_DROP: 1811 return XDP_DROP; 1812 } 1813 } 1814 1815 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) 1816 { 1817 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; 1818 } 1819 1820 /* We copy the packet for XDP in the following cases: 1821 * 1822 * 1) Packet is scattered across multiple rx buffers. 1823 * 2) Headroom space is insufficient. 1824 * 1825 * This is inefficient but it's a temporary condition that 1826 * we hit right after XDP is enabled and until queue is refilled 1827 * with large buffers with sufficient headroom - so it should affect 1828 * at most queue size packets. 1829 * Afterwards, the conditions to enable 1830 * XDP should preclude the underlying device from sending packets 1831 * across multiple buffers (num_buf > 1), and we make sure buffers 1832 * have enough headroom. 1833 */ 1834 static struct page *xdp_linearize_page(struct net_device *dev, 1835 struct receive_queue *rq, 1836 int *num_buf, 1837 struct page *p, 1838 int offset, 1839 int page_off, 1840 unsigned int *len) 1841 { 1842 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1843 struct page *page; 1844 1845 if (page_off + *len + tailroom > PAGE_SIZE) 1846 return NULL; 1847 1848 page = alloc_page(GFP_ATOMIC); 1849 if (!page) 1850 return NULL; 1851 1852 memcpy(page_address(page) + page_off, page_address(p) + offset, *len); 1853 page_off += *len; 1854 1855 /* Only mergeable mode can go inside this while loop. In small mode, 1856 * *num_buf == 1, so it cannot go inside. 1857 */ 1858 while (--*num_buf) { 1859 unsigned int buflen; 1860 void *buf; 1861 void *ctx; 1862 int off; 1863 1864 buf = virtnet_rq_get_buf(rq, &buflen, &ctx); 1865 if (unlikely(!buf)) 1866 goto err_buf; 1867 1868 p = virt_to_head_page(buf); 1869 off = buf - page_address(p); 1870 1871 if (check_mergeable_len(dev, ctx, buflen)) { 1872 put_page(p); 1873 goto err_buf; 1874 } 1875 1876 /* guard against a misconfigured or uncooperative backend that 1877 * is sending packet larger than the MTU. 1878 */ 1879 if ((page_off + buflen + tailroom) > PAGE_SIZE) { 1880 put_page(p); 1881 goto err_buf; 1882 } 1883 1884 memcpy(page_address(page) + page_off, 1885 page_address(p) + off, buflen); 1886 page_off += buflen; 1887 put_page(p); 1888 } 1889 1890 /* Headroom does not contribute to packet length */ 1891 *len = page_off - XDP_PACKET_HEADROOM; 1892 return page; 1893 err_buf: 1894 __free_pages(page, 0); 1895 return NULL; 1896 } 1897 1898 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, 1899 unsigned int xdp_headroom, 1900 void *buf, 1901 unsigned int len) 1902 { 1903 unsigned int header_offset; 1904 unsigned int headroom; 1905 unsigned int buflen; 1906 struct sk_buff *skb; 1907 1908 header_offset = VIRTNET_RX_PAD + xdp_headroom; 1909 headroom = vi->hdr_len + header_offset; 1910 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1911 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1912 1913 skb = virtnet_build_skb(buf, buflen, headroom, len); 1914 if (unlikely(!skb)) 1915 return NULL; 1916 1917 buf += header_offset; 1918 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); 1919 1920 return skb; 1921 } 1922 1923 static struct sk_buff *receive_small_xdp(struct net_device *dev, 1924 struct virtnet_info *vi, 1925 struct receive_queue *rq, 1926 struct bpf_prog *xdp_prog, 1927 void *buf, 1928 unsigned int xdp_headroom, 1929 unsigned int len, 1930 unsigned int *xdp_xmit, 1931 struct virtnet_rq_stats *stats) 1932 { 1933 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; 1934 unsigned int headroom = vi->hdr_len + header_offset; 1935 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; 1936 struct page *page = virt_to_head_page(buf); 1937 struct page *xdp_page; 1938 unsigned int buflen; 1939 struct xdp_buff xdp; 1940 struct sk_buff *skb; 1941 unsigned int metasize = 0; 1942 u32 act; 1943 1944 if (unlikely(hdr->hdr.gso_type)) 1945 goto err_xdp; 1946 1947 /* Partially checksummed packets must be dropped. */ 1948 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) 1949 goto err_xdp; 1950 1951 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1952 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1953 1954 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { 1955 int offset = buf - page_address(page) + header_offset; 1956 unsigned int tlen = len + vi->hdr_len; 1957 int num_buf = 1; 1958 1959 xdp_headroom = virtnet_get_headroom(vi); 1960 header_offset = VIRTNET_RX_PAD + xdp_headroom; 1961 headroom = vi->hdr_len + header_offset; 1962 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + 1963 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1964 xdp_page = xdp_linearize_page(dev, rq, &num_buf, page, 1965 offset, header_offset, 1966 &tlen); 1967 if (!xdp_page) 1968 goto err_xdp; 1969 1970 buf = page_address(xdp_page); 1971 put_page(page); 1972 page = xdp_page; 1973 } 1974 1975 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); 1976 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, 1977 xdp_headroom, len, true); 1978 1979 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 1980 1981 switch (act) { 1982 case XDP_PASS: 1983 /* Recalculate length in case bpf program changed it */ 1984 len = xdp.data_end - xdp.data; 1985 metasize = xdp.data - xdp.data_meta; 1986 break; 1987 1988 case XDP_TX: 1989 case XDP_REDIRECT: 1990 goto xdp_xmit; 1991 1992 default: 1993 goto err_xdp; 1994 } 1995 1996 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); 1997 if (unlikely(!skb)) 1998 goto err; 1999 2000 if (metasize) 2001 skb_metadata_set(skb, metasize); 2002 2003 return skb; 2004 2005 err_xdp: 2006 u64_stats_inc(&stats->xdp_drops); 2007 err: 2008 u64_stats_inc(&stats->drops); 2009 put_page(page); 2010 xdp_xmit: 2011 return NULL; 2012 } 2013 2014 static struct sk_buff *receive_small(struct net_device *dev, 2015 struct virtnet_info *vi, 2016 struct receive_queue *rq, 2017 void *buf, void *ctx, 2018 unsigned int len, 2019 unsigned int *xdp_xmit, 2020 struct virtnet_rq_stats *stats) 2021 { 2022 unsigned int xdp_headroom = (unsigned long)ctx; 2023 struct page *page = virt_to_head_page(buf); 2024 struct sk_buff *skb; 2025 2026 /* We passed the address of virtnet header to virtio-core, 2027 * so truncate the padding. 2028 */ 2029 buf -= VIRTNET_RX_PAD + xdp_headroom; 2030 2031 len -= vi->hdr_len; 2032 u64_stats_add(&stats->bytes, len); 2033 2034 if (unlikely(len > GOOD_PACKET_LEN)) { 2035 pr_debug("%s: rx error: len %u exceeds max size %d\n", 2036 dev->name, len, GOOD_PACKET_LEN); 2037 DEV_STATS_INC(dev, rx_length_errors); 2038 goto err; 2039 } 2040 2041 if (unlikely(vi->xdp_enabled)) { 2042 struct bpf_prog *xdp_prog; 2043 2044 rcu_read_lock(); 2045 xdp_prog = rcu_dereference(rq->xdp_prog); 2046 if (xdp_prog) { 2047 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, 2048 xdp_headroom, len, xdp_xmit, 2049 stats); 2050 rcu_read_unlock(); 2051 return skb; 2052 } 2053 rcu_read_unlock(); 2054 } 2055 2056 skb = receive_small_build_skb(vi, xdp_headroom, buf, len); 2057 if (likely(skb)) 2058 return skb; 2059 2060 err: 2061 u64_stats_inc(&stats->drops); 2062 put_page(page); 2063 return NULL; 2064 } 2065 2066 static struct sk_buff *receive_big(struct net_device *dev, 2067 struct virtnet_info *vi, 2068 struct receive_queue *rq, 2069 void *buf, 2070 unsigned int len, 2071 struct virtnet_rq_stats *stats) 2072 { 2073 struct page *page = buf; 2074 struct sk_buff *skb = 2075 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); 2076 2077 u64_stats_add(&stats->bytes, len - vi->hdr_len); 2078 if (unlikely(!skb)) 2079 goto err; 2080 2081 return skb; 2082 2083 err: 2084 u64_stats_inc(&stats->drops); 2085 give_pages(rq, page); 2086 return NULL; 2087 } 2088 2089 static void mergeable_buf_free(struct receive_queue *rq, int num_buf, 2090 struct net_device *dev, 2091 struct virtnet_rq_stats *stats) 2092 { 2093 struct page *page; 2094 void *buf; 2095 int len; 2096 2097 while (num_buf-- > 1) { 2098 buf = virtnet_rq_get_buf(rq, &len, NULL); 2099 if (unlikely(!buf)) { 2100 pr_debug("%s: rx error: %d buffers missing\n", 2101 dev->name, num_buf); 2102 DEV_STATS_INC(dev, rx_length_errors); 2103 break; 2104 } 2105 u64_stats_add(&stats->bytes, len); 2106 page = virt_to_head_page(buf); 2107 put_page(page); 2108 } 2109 } 2110 2111 /* Why not use xdp_build_skb_from_frame() ? 2112 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in 2113 * virtio-net there are 2 points that do not match its requirements: 2114 * 1. The size of the prefilled buffer is not fixed before xdp is set. 2115 * 2. xdp_build_skb_from_frame() does more checks that we don't need, 2116 * like eth_type_trans() (which virtio-net does in receive_buf()). 2117 */ 2118 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, 2119 struct virtnet_info *vi, 2120 struct xdp_buff *xdp, 2121 unsigned int xdp_frags_truesz) 2122 { 2123 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 2124 unsigned int headroom, data_len; 2125 struct sk_buff *skb; 2126 int metasize; 2127 u8 nr_frags; 2128 2129 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 2130 pr_debug("Error building skb as missing reserved tailroom for xdp"); 2131 return NULL; 2132 } 2133 2134 if (unlikely(xdp_buff_has_frags(xdp))) 2135 nr_frags = sinfo->nr_frags; 2136 2137 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); 2138 if (unlikely(!skb)) 2139 return NULL; 2140 2141 headroom = xdp->data - xdp->data_hard_start; 2142 data_len = xdp->data_end - xdp->data; 2143 skb_reserve(skb, headroom); 2144 __skb_put(skb, data_len); 2145 2146 metasize = xdp->data - xdp->data_meta; 2147 metasize = metasize > 0 ? metasize : 0; 2148 if (metasize) 2149 skb_metadata_set(skb, metasize); 2150 2151 if (unlikely(xdp_buff_has_frags(xdp))) 2152 xdp_update_skb_shared_info(skb, nr_frags, 2153 sinfo->xdp_frags_size, 2154 xdp_frags_truesz, 2155 xdp_buff_is_frag_pfmemalloc(xdp)); 2156 2157 return skb; 2158 } 2159 2160 /* TODO: build xdp in big mode */ 2161 static int virtnet_build_xdp_buff_mrg(struct net_device *dev, 2162 struct virtnet_info *vi, 2163 struct receive_queue *rq, 2164 struct xdp_buff *xdp, 2165 void *buf, 2166 unsigned int len, 2167 unsigned int frame_sz, 2168 int *num_buf, 2169 unsigned int *xdp_frags_truesize, 2170 struct virtnet_rq_stats *stats) 2171 { 2172 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 2173 struct skb_shared_info *shinfo; 2174 unsigned int xdp_frags_truesz = 0; 2175 unsigned int truesize; 2176 struct page *page; 2177 skb_frag_t *frag; 2178 int offset; 2179 void *ctx; 2180 2181 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); 2182 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, 2183 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); 2184 2185 if (!*num_buf) 2186 return 0; 2187 2188 if (*num_buf > 1) { 2189 /* If we want to build multi-buffer xdp, we need 2190 * to specify that the flags of xdp_buff have the 2191 * XDP_FLAGS_HAS_FRAG bit. 2192 */ 2193 if (!xdp_buff_has_frags(xdp)) 2194 xdp_buff_set_frags_flag(xdp); 2195 2196 shinfo = xdp_get_shared_info_from_buff(xdp); 2197 shinfo->nr_frags = 0; 2198 shinfo->xdp_frags_size = 0; 2199 } 2200 2201 if (*num_buf > MAX_SKB_FRAGS + 1) 2202 return -EINVAL; 2203 2204 while (--*num_buf > 0) { 2205 buf = virtnet_rq_get_buf(rq, &len, &ctx); 2206 if (unlikely(!buf)) { 2207 pr_debug("%s: rx error: %d buffers out of %d missing\n", 2208 dev->name, *num_buf, 2209 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); 2210 DEV_STATS_INC(dev, rx_length_errors); 2211 goto err; 2212 } 2213 2214 u64_stats_add(&stats->bytes, len); 2215 page = virt_to_head_page(buf); 2216 offset = buf - page_address(page); 2217 2218 if (check_mergeable_len(dev, ctx, len)) { 2219 put_page(page); 2220 goto err; 2221 } 2222 2223 truesize = mergeable_ctx_to_truesize(ctx); 2224 xdp_frags_truesz += truesize; 2225 2226 frag = &shinfo->frags[shinfo->nr_frags++]; 2227 skb_frag_fill_page_desc(frag, page, offset, len); 2228 if (page_is_pfmemalloc(page)) 2229 xdp_buff_set_frag_pfmemalloc(xdp); 2230 2231 shinfo->xdp_frags_size += len; 2232 } 2233 2234 *xdp_frags_truesize = xdp_frags_truesz; 2235 return 0; 2236 2237 err: 2238 put_xdp_frags(xdp); 2239 return -EINVAL; 2240 } 2241 2242 static void *mergeable_xdp_get_buf(struct virtnet_info *vi, 2243 struct receive_queue *rq, 2244 struct bpf_prog *xdp_prog, 2245 void *ctx, 2246 unsigned int *frame_sz, 2247 int *num_buf, 2248 struct page **page, 2249 int offset, 2250 unsigned int *len, 2251 struct virtio_net_hdr_mrg_rxbuf *hdr) 2252 { 2253 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 2254 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 2255 struct page *xdp_page; 2256 unsigned int xdp_room; 2257 2258 /* Transient failure which in theory could occur if 2259 * in-flight packets from before XDP was enabled reach 2260 * the receive path after XDP is loaded. 2261 */ 2262 if (unlikely(hdr->hdr.gso_type)) 2263 return NULL; 2264 2265 /* Partially checksummed packets must be dropped. */ 2266 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) 2267 return NULL; 2268 2269 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers 2270 * with headroom may add hole in truesize, which 2271 * make their length exceed PAGE_SIZE. So we disabled the 2272 * hole mechanism for xdp. See add_recvbuf_mergeable(). 2273 */ 2274 *frame_sz = truesize; 2275 2276 if (likely(headroom >= virtnet_get_headroom(vi) && 2277 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { 2278 return page_address(*page) + offset; 2279 } 2280 2281 /* This happens when headroom is not enough because 2282 * of the buffer was prefilled before XDP is set. 2283 * This should only happen for the first several packets. 2284 * In fact, vq reset can be used here to help us clean up 2285 * the prefilled buffers, but many existing devices do not 2286 * support it, and we don't want to bother users who are 2287 * using xdp normally. 2288 */ 2289 if (!xdp_prog->aux->xdp_has_frags) { 2290 /* linearize data for XDP */ 2291 xdp_page = xdp_linearize_page(vi->dev, rq, num_buf, 2292 *page, offset, 2293 XDP_PACKET_HEADROOM, 2294 len); 2295 if (!xdp_page) 2296 return NULL; 2297 } else { 2298 xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM + 2299 sizeof(struct skb_shared_info)); 2300 if (*len + xdp_room > PAGE_SIZE) 2301 return NULL; 2302 2303 xdp_page = alloc_page(GFP_ATOMIC); 2304 if (!xdp_page) 2305 return NULL; 2306 2307 memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM, 2308 page_address(*page) + offset, *len); 2309 } 2310 2311 *frame_sz = PAGE_SIZE; 2312 2313 put_page(*page); 2314 2315 *page = xdp_page; 2316 2317 return page_address(*page) + XDP_PACKET_HEADROOM; 2318 } 2319 2320 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, 2321 struct virtnet_info *vi, 2322 struct receive_queue *rq, 2323 struct bpf_prog *xdp_prog, 2324 void *buf, 2325 void *ctx, 2326 unsigned int len, 2327 unsigned int *xdp_xmit, 2328 struct virtnet_rq_stats *stats) 2329 { 2330 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 2331 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 2332 struct page *page = virt_to_head_page(buf); 2333 int offset = buf - page_address(page); 2334 unsigned int xdp_frags_truesz = 0; 2335 struct sk_buff *head_skb; 2336 unsigned int frame_sz; 2337 struct xdp_buff xdp; 2338 void *data; 2339 u32 act; 2340 int err; 2341 2342 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, 2343 offset, &len, hdr); 2344 if (unlikely(!data)) 2345 goto err_xdp; 2346 2347 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, 2348 &num_buf, &xdp_frags_truesz, stats); 2349 if (unlikely(err)) 2350 goto err_xdp; 2351 2352 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); 2353 2354 switch (act) { 2355 case XDP_PASS: 2356 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); 2357 if (unlikely(!head_skb)) 2358 break; 2359 return head_skb; 2360 2361 case XDP_TX: 2362 case XDP_REDIRECT: 2363 return NULL; 2364 2365 default: 2366 break; 2367 } 2368 2369 put_xdp_frags(&xdp); 2370 2371 err_xdp: 2372 put_page(page); 2373 mergeable_buf_free(rq, num_buf, dev, stats); 2374 2375 u64_stats_inc(&stats->xdp_drops); 2376 u64_stats_inc(&stats->drops); 2377 return NULL; 2378 } 2379 2380 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, 2381 struct sk_buff *curr_skb, 2382 struct page *page, void *buf, 2383 int len, int truesize) 2384 { 2385 int num_skb_frags; 2386 int offset; 2387 2388 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 2389 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 2390 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 2391 2392 if (unlikely(!nskb)) 2393 return NULL; 2394 2395 if (curr_skb == head_skb) 2396 skb_shinfo(curr_skb)->frag_list = nskb; 2397 else 2398 curr_skb->next = nskb; 2399 curr_skb = nskb; 2400 head_skb->truesize += nskb->truesize; 2401 num_skb_frags = 0; 2402 } 2403 2404 if (curr_skb != head_skb) { 2405 head_skb->data_len += len; 2406 head_skb->len += len; 2407 head_skb->truesize += truesize; 2408 } 2409 2410 offset = buf - page_address(page); 2411 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 2412 put_page(page); 2413 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 2414 len, truesize); 2415 } else { 2416 skb_add_rx_frag(curr_skb, num_skb_frags, page, 2417 offset, len, truesize); 2418 } 2419 2420 return curr_skb; 2421 } 2422 2423 static struct sk_buff *receive_mergeable(struct net_device *dev, 2424 struct virtnet_info *vi, 2425 struct receive_queue *rq, 2426 void *buf, 2427 void *ctx, 2428 unsigned int len, 2429 unsigned int *xdp_xmit, 2430 struct virtnet_rq_stats *stats) 2431 { 2432 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 2433 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 2434 struct page *page = virt_to_head_page(buf); 2435 int offset = buf - page_address(page); 2436 struct sk_buff *head_skb, *curr_skb; 2437 unsigned int truesize = mergeable_ctx_to_truesize(ctx); 2438 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 2439 2440 head_skb = NULL; 2441 u64_stats_add(&stats->bytes, len - vi->hdr_len); 2442 2443 if (check_mergeable_len(dev, ctx, len)) 2444 goto err_skb; 2445 2446 if (unlikely(vi->xdp_enabled)) { 2447 struct bpf_prog *xdp_prog; 2448 2449 rcu_read_lock(); 2450 xdp_prog = rcu_dereference(rq->xdp_prog); 2451 if (xdp_prog) { 2452 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, 2453 len, xdp_xmit, stats); 2454 rcu_read_unlock(); 2455 return head_skb; 2456 } 2457 rcu_read_unlock(); 2458 } 2459 2460 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); 2461 curr_skb = head_skb; 2462 2463 if (unlikely(!curr_skb)) 2464 goto err_skb; 2465 while (--num_buf) { 2466 buf = virtnet_rq_get_buf(rq, &len, &ctx); 2467 if (unlikely(!buf)) { 2468 pr_debug("%s: rx error: %d buffers out of %d missing\n", 2469 dev->name, num_buf, 2470 virtio16_to_cpu(vi->vdev, 2471 hdr->num_buffers)); 2472 DEV_STATS_INC(dev, rx_length_errors); 2473 goto err_buf; 2474 } 2475 2476 u64_stats_add(&stats->bytes, len); 2477 page = virt_to_head_page(buf); 2478 2479 if (check_mergeable_len(dev, ctx, len)) 2480 goto err_skb; 2481 2482 truesize = mergeable_ctx_to_truesize(ctx); 2483 curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page, 2484 buf, len, truesize); 2485 if (!curr_skb) 2486 goto err_skb; 2487 } 2488 2489 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); 2490 return head_skb; 2491 2492 err_skb: 2493 put_page(page); 2494 mergeable_buf_free(rq, num_buf, dev, stats); 2495 2496 err_buf: 2497 u64_stats_inc(&stats->drops); 2498 dev_kfree_skb(head_skb); 2499 return NULL; 2500 } 2501 2502 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, 2503 struct sk_buff *skb) 2504 { 2505 enum pkt_hash_types rss_hash_type; 2506 2507 if (!hdr_hash || !skb) 2508 return; 2509 2510 switch (__le16_to_cpu(hdr_hash->hash_report)) { 2511 case VIRTIO_NET_HASH_REPORT_TCPv4: 2512 case VIRTIO_NET_HASH_REPORT_UDPv4: 2513 case VIRTIO_NET_HASH_REPORT_TCPv6: 2514 case VIRTIO_NET_HASH_REPORT_UDPv6: 2515 case VIRTIO_NET_HASH_REPORT_TCPv6_EX: 2516 case VIRTIO_NET_HASH_REPORT_UDPv6_EX: 2517 rss_hash_type = PKT_HASH_TYPE_L4; 2518 break; 2519 case VIRTIO_NET_HASH_REPORT_IPv4: 2520 case VIRTIO_NET_HASH_REPORT_IPv6: 2521 case VIRTIO_NET_HASH_REPORT_IPv6_EX: 2522 rss_hash_type = PKT_HASH_TYPE_L3; 2523 break; 2524 case VIRTIO_NET_HASH_REPORT_NONE: 2525 default: 2526 rss_hash_type = PKT_HASH_TYPE_NONE; 2527 } 2528 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); 2529 } 2530 2531 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, 2532 struct sk_buff *skb, u8 flags) 2533 { 2534 struct virtio_net_common_hdr *hdr; 2535 struct net_device *dev = vi->dev; 2536 2537 hdr = skb_vnet_common_hdr(skb); 2538 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) 2539 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); 2540 2541 if (flags & VIRTIO_NET_HDR_F_DATA_VALID) 2542 skb->ip_summed = CHECKSUM_UNNECESSARY; 2543 2544 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, 2545 virtio_is_little_endian(vi->vdev))) { 2546 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", 2547 dev->name, hdr->hdr.gso_type, 2548 hdr->hdr.gso_size); 2549 goto frame_err; 2550 } 2551 2552 skb_record_rx_queue(skb, vq2rxq(rq->vq)); 2553 skb->protocol = eth_type_trans(skb, dev); 2554 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 2555 ntohs(skb->protocol), skb->len, skb->pkt_type); 2556 2557 napi_gro_receive(&rq->napi, skb); 2558 return; 2559 2560 frame_err: 2561 DEV_STATS_INC(dev, rx_frame_errors); 2562 dev_kfree_skb(skb); 2563 } 2564 2565 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 2566 void *buf, unsigned int len, void **ctx, 2567 unsigned int *xdp_xmit, 2568 struct virtnet_rq_stats *stats) 2569 { 2570 struct net_device *dev = vi->dev; 2571 struct sk_buff *skb; 2572 u8 flags; 2573 2574 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 2575 pr_debug("%s: short packet %i\n", dev->name, len); 2576 DEV_STATS_INC(dev, rx_length_errors); 2577 virtnet_rq_free_buf(vi, rq, buf); 2578 return; 2579 } 2580 2581 /* 1. Save the flags early, as the XDP program might overwrite them. 2582 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID 2583 * stay valid after XDP processing. 2584 * 2. XDP doesn't work with partially checksummed packets (refer to 2585 * virtnet_xdp_set()), so packets marked as 2586 * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing. 2587 */ 2588 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; 2589 2590 if (vi->mergeable_rx_bufs) 2591 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, 2592 stats); 2593 else if (vi->big_packets) 2594 skb = receive_big(dev, vi, rq, buf, len, stats); 2595 else 2596 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); 2597 2598 if (unlikely(!skb)) 2599 return; 2600 2601 virtnet_receive_done(vi, rq, skb, flags); 2602 } 2603 2604 /* Unlike mergeable buffers, all buffers are allocated to the 2605 * same size, except for the headroom. For this reason we do 2606 * not need to use mergeable_len_to_ctx here - it is enough 2607 * to store the headroom as the context ignoring the truesize. 2608 */ 2609 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, 2610 gfp_t gfp) 2611 { 2612 char *buf; 2613 unsigned int xdp_headroom = virtnet_get_headroom(vi); 2614 void *ctx = (void *)(unsigned long)xdp_headroom; 2615 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; 2616 int err; 2617 2618 len = SKB_DATA_ALIGN(len) + 2619 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2620 2621 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp))) 2622 return -ENOMEM; 2623 2624 buf = virtnet_rq_alloc(rq, len, gfp); 2625 if (unlikely(!buf)) 2626 return -ENOMEM; 2627 2628 buf += VIRTNET_RX_PAD + xdp_headroom; 2629 2630 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); 2631 2632 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); 2633 if (err < 0) { 2634 virtnet_rq_unmap(rq, buf, 0); 2635 put_page(virt_to_head_page(buf)); 2636 } 2637 2638 return err; 2639 } 2640 2641 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, 2642 gfp_t gfp) 2643 { 2644 struct page *first, *list = NULL; 2645 char *p; 2646 int i, err, offset; 2647 2648 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); 2649 2650 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ 2651 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { 2652 first = get_a_page(rq, gfp); 2653 if (!first) { 2654 if (list) 2655 give_pages(rq, list); 2656 return -ENOMEM; 2657 } 2658 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); 2659 2660 /* chain new page in list head to match sg */ 2661 first->private = (unsigned long)list; 2662 list = first; 2663 } 2664 2665 first = get_a_page(rq, gfp); 2666 if (!first) { 2667 give_pages(rq, list); 2668 return -ENOMEM; 2669 } 2670 p = page_address(first); 2671 2672 /* rq->sg[0], rq->sg[1] share the same page */ 2673 /* a separated rq->sg[0] for header - required in case !any_header_sg */ 2674 sg_set_buf(&rq->sg[0], p, vi->hdr_len); 2675 2676 /* rq->sg[1] for data packet, from offset */ 2677 offset = sizeof(struct padded_vnet_hdr); 2678 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); 2679 2680 /* chain first in list head */ 2681 first->private = (unsigned long)list; 2682 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, 2683 first, gfp); 2684 if (err < 0) 2685 give_pages(rq, first); 2686 2687 return err; 2688 } 2689 2690 static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 2691 struct ewma_pkt_len *avg_pkt_len, 2692 unsigned int room) 2693 { 2694 struct virtnet_info *vi = rq->vq->vdev->priv; 2695 const size_t hdr_len = vi->hdr_len; 2696 unsigned int len; 2697 2698 if (room) 2699 return PAGE_SIZE - room; 2700 2701 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 2702 rq->min_buf_len, PAGE_SIZE - hdr_len); 2703 2704 return ALIGN(len, L1_CACHE_BYTES); 2705 } 2706 2707 static int add_recvbuf_mergeable(struct virtnet_info *vi, 2708 struct receive_queue *rq, gfp_t gfp) 2709 { 2710 struct page_frag *alloc_frag = &rq->alloc_frag; 2711 unsigned int headroom = virtnet_get_headroom(vi); 2712 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 2713 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); 2714 unsigned int len, hole; 2715 void *ctx; 2716 char *buf; 2717 int err; 2718 2719 /* Extra tailroom is needed to satisfy XDP's assumption. This 2720 * means rx frags coalescing won't work, but consider we've 2721 * disabled GSO for XDP, it won't be a big issue. 2722 */ 2723 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); 2724 2725 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) 2726 return -ENOMEM; 2727 2728 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size) 2729 len -= sizeof(struct virtnet_rq_dma); 2730 2731 buf = virtnet_rq_alloc(rq, len + room, gfp); 2732 if (unlikely(!buf)) 2733 return -ENOMEM; 2734 2735 buf += headroom; /* advance address leaving hole at front of pkt */ 2736 hole = alloc_frag->size - alloc_frag->offset; 2737 if (hole < len + room) { 2738 /* To avoid internal fragmentation, if there is very likely not 2739 * enough space for another buffer, add the remaining space to 2740 * the current buffer. 2741 * XDP core assumes that frame_size of xdp_buff and the length 2742 * of the frag are PAGE_SIZE, so we disable the hole mechanism. 2743 */ 2744 if (!headroom) 2745 len += hole; 2746 alloc_frag->offset += hole; 2747 } 2748 2749 virtnet_rq_init_one_sg(rq, buf, len); 2750 2751 ctx = mergeable_len_to_ctx(len + room, headroom); 2752 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); 2753 if (err < 0) { 2754 virtnet_rq_unmap(rq, buf, 0); 2755 put_page(virt_to_head_page(buf)); 2756 } 2757 2758 return err; 2759 } 2760 2761 /* 2762 * Returns false if we couldn't fill entirely (OOM). 2763 * 2764 * Normally run in the receive path, but can also be run from ndo_open 2765 * before we're receiving packets, or from refill_work which is 2766 * careful to disable receiving (using napi_disable). 2767 */ 2768 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, 2769 gfp_t gfp) 2770 { 2771 int err; 2772 2773 if (rq->xsk_pool) { 2774 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); 2775 goto kick; 2776 } 2777 2778 do { 2779 if (vi->mergeable_rx_bufs) 2780 err = add_recvbuf_mergeable(vi, rq, gfp); 2781 else if (vi->big_packets) 2782 err = add_recvbuf_big(vi, rq, gfp); 2783 else 2784 err = add_recvbuf_small(vi, rq, gfp); 2785 2786 if (err) 2787 break; 2788 } while (rq->vq->num_free); 2789 2790 kick: 2791 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { 2792 unsigned long flags; 2793 2794 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); 2795 u64_stats_inc(&rq->stats.kicks); 2796 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); 2797 } 2798 2799 return err != -ENOMEM; 2800 } 2801 2802 static void skb_recv_done(struct virtqueue *rvq) 2803 { 2804 struct virtnet_info *vi = rvq->vdev->priv; 2805 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 2806 2807 rq->calls++; 2808 virtqueue_napi_schedule(&rq->napi, rvq); 2809 } 2810 2811 static void virtnet_napi_do_enable(struct virtqueue *vq, 2812 struct napi_struct *napi) 2813 { 2814 napi_enable(napi); 2815 2816 /* If all buffers were filled by other side before we napi_enabled, we 2817 * won't get another interrupt, so process any outstanding packets now. 2818 * Call local_bh_enable after to trigger softIRQ processing. 2819 */ 2820 local_bh_disable(); 2821 virtqueue_napi_schedule(napi, vq); 2822 local_bh_enable(); 2823 } 2824 2825 static void virtnet_napi_enable(struct receive_queue *rq) 2826 { 2827 struct virtnet_info *vi = rq->vq->vdev->priv; 2828 int qidx = vq2rxq(rq->vq); 2829 2830 virtnet_napi_do_enable(rq->vq, &rq->napi); 2831 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); 2832 } 2833 2834 static void virtnet_napi_tx_enable(struct send_queue *sq) 2835 { 2836 struct virtnet_info *vi = sq->vq->vdev->priv; 2837 struct napi_struct *napi = &sq->napi; 2838 int qidx = vq2txq(sq->vq); 2839 2840 if (!napi->weight) 2841 return; 2842 2843 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only 2844 * enable the feature if this is likely affine with the transmit path. 2845 */ 2846 if (!vi->affinity_hint_set) { 2847 napi->weight = 0; 2848 return; 2849 } 2850 2851 virtnet_napi_do_enable(sq->vq, napi); 2852 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); 2853 } 2854 2855 static void virtnet_napi_tx_disable(struct send_queue *sq) 2856 { 2857 struct virtnet_info *vi = sq->vq->vdev->priv; 2858 struct napi_struct *napi = &sq->napi; 2859 int qidx = vq2txq(sq->vq); 2860 2861 if (napi->weight) { 2862 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); 2863 napi_disable(napi); 2864 } 2865 } 2866 2867 static void virtnet_napi_disable(struct receive_queue *rq) 2868 { 2869 struct virtnet_info *vi = rq->vq->vdev->priv; 2870 struct napi_struct *napi = &rq->napi; 2871 int qidx = vq2rxq(rq->vq); 2872 2873 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); 2874 napi_disable(napi); 2875 } 2876 2877 static void refill_work(struct work_struct *work) 2878 { 2879 struct virtnet_info *vi = 2880 container_of(work, struct virtnet_info, refill.work); 2881 bool still_empty; 2882 int i; 2883 2884 for (i = 0; i < vi->curr_queue_pairs; i++) { 2885 struct receive_queue *rq = &vi->rq[i]; 2886 2887 /* 2888 * When queue API support is added in the future and the call 2889 * below becomes napi_disable_locked, this driver will need to 2890 * be refactored. 2891 * 2892 * One possible solution would be to: 2893 * - cancel refill_work with cancel_delayed_work (note: 2894 * non-sync) 2895 * - cancel refill_work with cancel_delayed_work_sync in 2896 * virtnet_remove after the netdev is unregistered 2897 * - wrap all of the work in a lock (perhaps the netdev 2898 * instance lock) 2899 * - check netif_running() and return early to avoid a race 2900 */ 2901 napi_disable(&rq->napi); 2902 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); 2903 virtnet_napi_do_enable(rq->vq, &rq->napi); 2904 2905 /* In theory, this can happen: if we don't get any buffers in 2906 * we will *never* try to fill again. 2907 */ 2908 if (still_empty) 2909 schedule_delayed_work(&vi->refill, HZ/2); 2910 } 2911 } 2912 2913 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi, 2914 struct receive_queue *rq, 2915 int budget, 2916 unsigned int *xdp_xmit, 2917 struct virtnet_rq_stats *stats) 2918 { 2919 unsigned int len; 2920 int packets = 0; 2921 void *buf; 2922 2923 while (packets < budget) { 2924 buf = virtqueue_get_buf(rq->vq, &len); 2925 if (!buf) 2926 break; 2927 2928 virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats); 2929 packets++; 2930 } 2931 2932 return packets; 2933 } 2934 2935 static int virtnet_receive_packets(struct virtnet_info *vi, 2936 struct receive_queue *rq, 2937 int budget, 2938 unsigned int *xdp_xmit, 2939 struct virtnet_rq_stats *stats) 2940 { 2941 unsigned int len; 2942 int packets = 0; 2943 void *buf; 2944 2945 if (!vi->big_packets || vi->mergeable_rx_bufs) { 2946 void *ctx; 2947 while (packets < budget && 2948 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { 2949 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats); 2950 packets++; 2951 } 2952 } else { 2953 while (packets < budget && 2954 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 2955 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats); 2956 packets++; 2957 } 2958 } 2959 2960 return packets; 2961 } 2962 2963 static int virtnet_receive(struct receive_queue *rq, int budget, 2964 unsigned int *xdp_xmit) 2965 { 2966 struct virtnet_info *vi = rq->vq->vdev->priv; 2967 struct virtnet_rq_stats stats = {}; 2968 int i, packets; 2969 2970 if (rq->xsk_pool) 2971 packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats); 2972 else 2973 packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats); 2974 2975 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { 2976 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { 2977 spin_lock(&vi->refill_lock); 2978 if (vi->refill_enabled) 2979 schedule_delayed_work(&vi->refill, 0); 2980 spin_unlock(&vi->refill_lock); 2981 } 2982 } 2983 2984 u64_stats_set(&stats.packets, packets); 2985 u64_stats_update_begin(&rq->stats.syncp); 2986 for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) { 2987 size_t offset = virtnet_rq_stats_desc[i].offset; 2988 u64_stats_t *item, *src; 2989 2990 item = (u64_stats_t *)((u8 *)&rq->stats + offset); 2991 src = (u64_stats_t *)((u8 *)&stats + offset); 2992 u64_stats_add(item, u64_stats_read(src)); 2993 } 2994 2995 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); 2996 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); 2997 2998 u64_stats_update_end(&rq->stats.syncp); 2999 3000 return packets; 3001 } 3002 3003 static void virtnet_poll_cleantx(struct receive_queue *rq, int budget) 3004 { 3005 struct virtnet_info *vi = rq->vq->vdev->priv; 3006 unsigned int index = vq2rxq(rq->vq); 3007 struct send_queue *sq = &vi->sq[index]; 3008 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 3009 3010 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) 3011 return; 3012 3013 if (__netif_tx_trylock(txq)) { 3014 if (sq->reset) { 3015 __netif_tx_unlock(txq); 3016 return; 3017 } 3018 3019 do { 3020 virtqueue_disable_cb(sq->vq); 3021 free_old_xmit(sq, txq, !!budget); 3022 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 3023 3024 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) { 3025 if (netif_tx_queue_stopped(txq)) { 3026 u64_stats_update_begin(&sq->stats.syncp); 3027 u64_stats_inc(&sq->stats.wake); 3028 u64_stats_update_end(&sq->stats.syncp); 3029 } 3030 netif_tx_wake_queue(txq); 3031 } 3032 3033 __netif_tx_unlock(txq); 3034 } 3035 } 3036 3037 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) 3038 { 3039 struct dim_sample cur_sample = {}; 3040 3041 if (!rq->packets_in_napi) 3042 return; 3043 3044 /* Don't need protection when fetching stats, since fetcher and 3045 * updater of the stats are in same context 3046 */ 3047 dim_update_sample(rq->calls, 3048 u64_stats_read(&rq->stats.packets), 3049 u64_stats_read(&rq->stats.bytes), 3050 &cur_sample); 3051 3052 net_dim(&rq->dim, &cur_sample); 3053 rq->packets_in_napi = 0; 3054 } 3055 3056 static int virtnet_poll(struct napi_struct *napi, int budget) 3057 { 3058 struct receive_queue *rq = 3059 container_of(napi, struct receive_queue, napi); 3060 struct virtnet_info *vi = rq->vq->vdev->priv; 3061 struct send_queue *sq; 3062 unsigned int received; 3063 unsigned int xdp_xmit = 0; 3064 bool napi_complete; 3065 3066 virtnet_poll_cleantx(rq, budget); 3067 3068 received = virtnet_receive(rq, budget, &xdp_xmit); 3069 rq->packets_in_napi += received; 3070 3071 if (xdp_xmit & VIRTIO_XDP_REDIR) 3072 xdp_do_flush(); 3073 3074 /* Out of packets? */ 3075 if (received < budget) { 3076 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); 3077 /* Intentionally not taking dim_lock here. This may result in a 3078 * spurious net_dim call. But if that happens virtnet_rx_dim_work 3079 * will not act on the scheduled work. 3080 */ 3081 if (napi_complete && rq->dim_enabled) 3082 virtnet_rx_dim_update(vi, rq); 3083 } 3084 3085 if (xdp_xmit & VIRTIO_XDP_TX) { 3086 sq = virtnet_xdp_get_sq(vi); 3087 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 3088 u64_stats_update_begin(&sq->stats.syncp); 3089 u64_stats_inc(&sq->stats.kicks); 3090 u64_stats_update_end(&sq->stats.syncp); 3091 } 3092 virtnet_xdp_put_sq(vi, sq); 3093 } 3094 3095 return received; 3096 } 3097 3098 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) 3099 { 3100 virtnet_napi_tx_disable(&vi->sq[qp_index]); 3101 virtnet_napi_disable(&vi->rq[qp_index]); 3102 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 3103 } 3104 3105 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) 3106 { 3107 struct net_device *dev = vi->dev; 3108 int err; 3109 3110 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, 3111 vi->rq[qp_index].napi.napi_id); 3112 if (err < 0) 3113 return err; 3114 3115 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, 3116 MEM_TYPE_PAGE_SHARED, NULL); 3117 if (err < 0) 3118 goto err_xdp_reg_mem_model; 3119 3120 virtnet_napi_enable(&vi->rq[qp_index]); 3121 virtnet_napi_tx_enable(&vi->sq[qp_index]); 3122 3123 return 0; 3124 3125 err_xdp_reg_mem_model: 3126 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); 3127 return err; 3128 } 3129 3130 static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim) 3131 { 3132 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3133 return; 3134 net_dim_work_cancel(dim); 3135 } 3136 3137 static void virtnet_update_settings(struct virtnet_info *vi) 3138 { 3139 u32 speed; 3140 u8 duplex; 3141 3142 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) 3143 return; 3144 3145 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); 3146 3147 if (ethtool_validate_speed(speed)) 3148 vi->speed = speed; 3149 3150 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); 3151 3152 if (ethtool_validate_duplex(duplex)) 3153 vi->duplex = duplex; 3154 } 3155 3156 static int virtnet_open(struct net_device *dev) 3157 { 3158 struct virtnet_info *vi = netdev_priv(dev); 3159 int i, err; 3160 3161 enable_delayed_refill(vi); 3162 3163 for (i = 0; i < vi->max_queue_pairs; i++) { 3164 if (i < vi->curr_queue_pairs) 3165 /* Make sure we have some buffers: if oom use wq. */ 3166 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) 3167 schedule_delayed_work(&vi->refill, 0); 3168 3169 err = virtnet_enable_queue_pair(vi, i); 3170 if (err < 0) 3171 goto err_enable_qp; 3172 } 3173 3174 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 3175 if (vi->status & VIRTIO_NET_S_LINK_UP) 3176 netif_carrier_on(vi->dev); 3177 virtio_config_driver_enable(vi->vdev); 3178 } else { 3179 vi->status = VIRTIO_NET_S_LINK_UP; 3180 netif_carrier_on(dev); 3181 } 3182 3183 return 0; 3184 3185 err_enable_qp: 3186 disable_delayed_refill(vi); 3187 cancel_delayed_work_sync(&vi->refill); 3188 3189 for (i--; i >= 0; i--) { 3190 virtnet_disable_queue_pair(vi, i); 3191 virtnet_cancel_dim(vi, &vi->rq[i].dim); 3192 } 3193 3194 return err; 3195 } 3196 3197 static int virtnet_poll_tx(struct napi_struct *napi, int budget) 3198 { 3199 struct send_queue *sq = container_of(napi, struct send_queue, napi); 3200 struct virtnet_info *vi = sq->vq->vdev->priv; 3201 unsigned int index = vq2txq(sq->vq); 3202 struct netdev_queue *txq; 3203 int opaque, xsk_done = 0; 3204 bool done; 3205 3206 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { 3207 /* We don't need to enable cb for XDP */ 3208 napi_complete_done(napi, 0); 3209 return 0; 3210 } 3211 3212 txq = netdev_get_tx_queue(vi->dev, index); 3213 __netif_tx_lock(txq, raw_smp_processor_id()); 3214 virtqueue_disable_cb(sq->vq); 3215 3216 if (sq->xsk_pool) 3217 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget); 3218 else 3219 free_old_xmit(sq, txq, !!budget); 3220 3221 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) { 3222 if (netif_tx_queue_stopped(txq)) { 3223 u64_stats_update_begin(&sq->stats.syncp); 3224 u64_stats_inc(&sq->stats.wake); 3225 u64_stats_update_end(&sq->stats.syncp); 3226 } 3227 netif_tx_wake_queue(txq); 3228 } 3229 3230 if (xsk_done >= budget) { 3231 __netif_tx_unlock(txq); 3232 return budget; 3233 } 3234 3235 opaque = virtqueue_enable_cb_prepare(sq->vq); 3236 3237 done = napi_complete_done(napi, 0); 3238 3239 if (!done) 3240 virtqueue_disable_cb(sq->vq); 3241 3242 __netif_tx_unlock(txq); 3243 3244 if (done) { 3245 if (unlikely(virtqueue_poll(sq->vq, opaque))) { 3246 if (napi_schedule_prep(napi)) { 3247 __netif_tx_lock(txq, raw_smp_processor_id()); 3248 virtqueue_disable_cb(sq->vq); 3249 __netif_tx_unlock(txq); 3250 __napi_schedule(napi); 3251 } 3252 } 3253 } 3254 3255 return 0; 3256 } 3257 3258 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan) 3259 { 3260 struct virtio_net_hdr_mrg_rxbuf *hdr; 3261 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 3262 struct virtnet_info *vi = sq->vq->vdev->priv; 3263 int num_sg; 3264 unsigned hdr_len = vi->hdr_len; 3265 bool can_push; 3266 3267 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 3268 3269 can_push = vi->any_header_sg && 3270 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && 3271 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; 3272 /* Even if we can, don't push here yet as this would skew 3273 * csum_start offset below. */ 3274 if (can_push) 3275 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); 3276 else 3277 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; 3278 3279 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 3280 virtio_is_little_endian(vi->vdev), false, 3281 0)) 3282 return -EPROTO; 3283 3284 if (vi->mergeable_rx_bufs) 3285 hdr->num_buffers = 0; 3286 3287 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); 3288 if (can_push) { 3289 __skb_push(skb, hdr_len); 3290 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); 3291 if (unlikely(num_sg < 0)) 3292 return num_sg; 3293 /* Pull header back to avoid skew in tx bytes calculations. */ 3294 __skb_pull(skb, hdr_len); 3295 } else { 3296 sg_set_buf(sq->sg, hdr, hdr_len); 3297 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); 3298 if (unlikely(num_sg < 0)) 3299 return num_sg; 3300 num_sg++; 3301 } 3302 3303 return virtnet_add_outbuf(sq, num_sg, skb, 3304 orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB); 3305 } 3306 3307 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 3308 { 3309 struct virtnet_info *vi = netdev_priv(dev); 3310 int qnum = skb_get_queue_mapping(skb); 3311 struct send_queue *sq = &vi->sq[qnum]; 3312 int err; 3313 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); 3314 bool xmit_more = netdev_xmit_more(); 3315 bool use_napi = sq->napi.weight; 3316 bool kick; 3317 3318 if (!use_napi) 3319 free_old_xmit(sq, txq, false); 3320 else 3321 virtqueue_disable_cb(sq->vq); 3322 3323 /* timestamp packet in software */ 3324 skb_tx_timestamp(skb); 3325 3326 /* Try to transmit */ 3327 err = xmit_skb(sq, skb, !use_napi); 3328 3329 /* This should not happen! */ 3330 if (unlikely(err)) { 3331 DEV_STATS_INC(dev, tx_fifo_errors); 3332 if (net_ratelimit()) 3333 dev_warn(&dev->dev, 3334 "Unexpected TXQ (%d) queue failure: %d\n", 3335 qnum, err); 3336 DEV_STATS_INC(dev, tx_dropped); 3337 dev_kfree_skb_any(skb); 3338 return NETDEV_TX_OK; 3339 } 3340 3341 /* Don't wait up for transmitted skbs to be freed. */ 3342 if (!use_napi) { 3343 skb_orphan(skb); 3344 nf_reset_ct(skb); 3345 } 3346 3347 if (use_napi) 3348 tx_may_stop(vi, dev, sq); 3349 else 3350 check_sq_full_and_disable(vi, dev,sq); 3351 3352 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : 3353 !xmit_more || netif_xmit_stopped(txq); 3354 if (kick) { 3355 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { 3356 u64_stats_update_begin(&sq->stats.syncp); 3357 u64_stats_inc(&sq->stats.kicks); 3358 u64_stats_update_end(&sq->stats.syncp); 3359 } 3360 } 3361 3362 if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 3363 virtqueue_napi_schedule(&sq->napi, sq->vq); 3364 3365 return NETDEV_TX_OK; 3366 } 3367 3368 static void __virtnet_rx_pause(struct virtnet_info *vi, 3369 struct receive_queue *rq) 3370 { 3371 bool running = netif_running(vi->dev); 3372 3373 if (running) { 3374 virtnet_napi_disable(rq); 3375 virtnet_cancel_dim(vi, &rq->dim); 3376 } 3377 } 3378 3379 static void virtnet_rx_pause_all(struct virtnet_info *vi) 3380 { 3381 int i; 3382 3383 /* 3384 * Make sure refill_work does not run concurrently to 3385 * avoid napi_disable race which leads to deadlock. 3386 */ 3387 disable_delayed_refill(vi); 3388 cancel_delayed_work_sync(&vi->refill); 3389 for (i = 0; i < vi->max_queue_pairs; i++) 3390 __virtnet_rx_pause(vi, &vi->rq[i]); 3391 } 3392 3393 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) 3394 { 3395 /* 3396 * Make sure refill_work does not run concurrently to 3397 * avoid napi_disable race which leads to deadlock. 3398 */ 3399 disable_delayed_refill(vi); 3400 cancel_delayed_work_sync(&vi->refill); 3401 __virtnet_rx_pause(vi, rq); 3402 } 3403 3404 static void __virtnet_rx_resume(struct virtnet_info *vi, 3405 struct receive_queue *rq, 3406 bool refill) 3407 { 3408 bool running = netif_running(vi->dev); 3409 bool schedule_refill = false; 3410 3411 if (refill && !try_fill_recv(vi, rq, GFP_KERNEL)) 3412 schedule_refill = true; 3413 if (running) 3414 virtnet_napi_enable(rq); 3415 3416 if (schedule_refill) 3417 schedule_delayed_work(&vi->refill, 0); 3418 } 3419 3420 static void virtnet_rx_resume_all(struct virtnet_info *vi) 3421 { 3422 int i; 3423 3424 enable_delayed_refill(vi); 3425 for (i = 0; i < vi->max_queue_pairs; i++) { 3426 if (i < vi->curr_queue_pairs) 3427 __virtnet_rx_resume(vi, &vi->rq[i], true); 3428 else 3429 __virtnet_rx_resume(vi, &vi->rq[i], false); 3430 } 3431 } 3432 3433 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) 3434 { 3435 enable_delayed_refill(vi); 3436 __virtnet_rx_resume(vi, rq, true); 3437 } 3438 3439 static int virtnet_rx_resize(struct virtnet_info *vi, 3440 struct receive_queue *rq, u32 ring_num) 3441 { 3442 int err, qindex; 3443 3444 qindex = rq - vi->rq; 3445 3446 virtnet_rx_pause(vi, rq); 3447 3448 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL); 3449 if (err) 3450 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); 3451 3452 virtnet_rx_resume(vi, rq); 3453 return err; 3454 } 3455 3456 static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq) 3457 { 3458 bool running = netif_running(vi->dev); 3459 struct netdev_queue *txq; 3460 int qindex; 3461 3462 qindex = sq - vi->sq; 3463 3464 if (running) 3465 virtnet_napi_tx_disable(sq); 3466 3467 txq = netdev_get_tx_queue(vi->dev, qindex); 3468 3469 /* 1. wait all ximt complete 3470 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue() 3471 */ 3472 __netif_tx_lock_bh(txq); 3473 3474 /* Prevent rx poll from accessing sq. */ 3475 sq->reset = true; 3476 3477 /* Prevent the upper layer from trying to send packets. */ 3478 netif_stop_subqueue(vi->dev, qindex); 3479 3480 __netif_tx_unlock_bh(txq); 3481 } 3482 3483 static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq) 3484 { 3485 bool running = netif_running(vi->dev); 3486 struct netdev_queue *txq; 3487 int qindex; 3488 3489 qindex = sq - vi->sq; 3490 3491 txq = netdev_get_tx_queue(vi->dev, qindex); 3492 3493 __netif_tx_lock_bh(txq); 3494 sq->reset = false; 3495 netif_tx_wake_queue(txq); 3496 __netif_tx_unlock_bh(txq); 3497 3498 if (running) 3499 virtnet_napi_tx_enable(sq); 3500 } 3501 3502 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, 3503 u32 ring_num) 3504 { 3505 int qindex, err; 3506 3507 if (ring_num <= MAX_SKB_FRAGS + 2) { 3508 netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n", 3509 ring_num, MAX_SKB_FRAGS + 2); 3510 return -EINVAL; 3511 } 3512 3513 qindex = sq - vi->sq; 3514 3515 virtnet_tx_pause(vi, sq); 3516 3517 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, 3518 virtnet_sq_free_unused_buf_done); 3519 if (err) 3520 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); 3521 3522 virtnet_tx_resume(vi, sq); 3523 3524 return err; 3525 } 3526 3527 /* 3528 * Send command via the control virtqueue and check status. Commands 3529 * supported by the hypervisor, as indicated by feature bits, should 3530 * never fail unless improperly formatted. 3531 */ 3532 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd, 3533 struct scatterlist *out, 3534 struct scatterlist *in) 3535 { 3536 struct scatterlist *sgs[5], hdr, stat; 3537 u32 out_num = 0, tmp, in_num = 0; 3538 bool ok; 3539 int ret; 3540 3541 /* Caller should know better */ 3542 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 3543 3544 mutex_lock(&vi->cvq_lock); 3545 vi->ctrl->status = ~0; 3546 vi->ctrl->hdr.class = class; 3547 vi->ctrl->hdr.cmd = cmd; 3548 /* Add header */ 3549 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 3550 sgs[out_num++] = &hdr; 3551 3552 if (out) 3553 sgs[out_num++] = out; 3554 3555 /* Add return status. */ 3556 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 3557 sgs[out_num + in_num++] = &stat; 3558 3559 if (in) 3560 sgs[out_num + in_num++] = in; 3561 3562 BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); 3563 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); 3564 if (ret < 0) { 3565 dev_warn(&vi->vdev->dev, 3566 "Failed to add sgs for command vq: %d\n.", ret); 3567 mutex_unlock(&vi->cvq_lock); 3568 return false; 3569 } 3570 3571 if (unlikely(!virtqueue_kick(vi->cvq))) 3572 goto unlock; 3573 3574 /* Spin for a response, the kick causes an ioport write, trapping 3575 * into the hypervisor, so the request should be handled immediately. 3576 */ 3577 while (!virtqueue_get_buf(vi->cvq, &tmp) && 3578 !virtqueue_is_broken(vi->cvq)) { 3579 cond_resched(); 3580 cpu_relax(); 3581 } 3582 3583 unlock: 3584 ok = vi->ctrl->status == VIRTIO_NET_OK; 3585 mutex_unlock(&vi->cvq_lock); 3586 return ok; 3587 } 3588 3589 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 3590 struct scatterlist *out) 3591 { 3592 return virtnet_send_command_reply(vi, class, cmd, out, NULL); 3593 } 3594 3595 static int virtnet_set_mac_address(struct net_device *dev, void *p) 3596 { 3597 struct virtnet_info *vi = netdev_priv(dev); 3598 struct virtio_device *vdev = vi->vdev; 3599 int ret; 3600 struct sockaddr *addr; 3601 struct scatterlist sg; 3602 3603 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 3604 return -EOPNOTSUPP; 3605 3606 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); 3607 if (!addr) 3608 return -ENOMEM; 3609 3610 ret = eth_prepare_mac_addr_change(dev, addr); 3611 if (ret) 3612 goto out; 3613 3614 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 3615 sg_init_one(&sg, addr->sa_data, dev->addr_len); 3616 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 3617 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 3618 dev_warn(&vdev->dev, 3619 "Failed to set mac address by vq command.\n"); 3620 ret = -EINVAL; 3621 goto out; 3622 } 3623 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 3624 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 3625 unsigned int i; 3626 3627 /* Naturally, this has an atomicity problem. */ 3628 for (i = 0; i < dev->addr_len; i++) 3629 virtio_cwrite8(vdev, 3630 offsetof(struct virtio_net_config, mac) + 3631 i, addr->sa_data[i]); 3632 } 3633 3634 eth_commit_mac_addr_change(dev, p); 3635 ret = 0; 3636 3637 out: 3638 kfree(addr); 3639 return ret; 3640 } 3641 3642 static void virtnet_stats(struct net_device *dev, 3643 struct rtnl_link_stats64 *tot) 3644 { 3645 struct virtnet_info *vi = netdev_priv(dev); 3646 unsigned int start; 3647 int i; 3648 3649 for (i = 0; i < vi->max_queue_pairs; i++) { 3650 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; 3651 struct receive_queue *rq = &vi->rq[i]; 3652 struct send_queue *sq = &vi->sq[i]; 3653 3654 do { 3655 start = u64_stats_fetch_begin(&sq->stats.syncp); 3656 tpackets = u64_stats_read(&sq->stats.packets); 3657 tbytes = u64_stats_read(&sq->stats.bytes); 3658 terrors = u64_stats_read(&sq->stats.tx_timeouts); 3659 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 3660 3661 do { 3662 start = u64_stats_fetch_begin(&rq->stats.syncp); 3663 rpackets = u64_stats_read(&rq->stats.packets); 3664 rbytes = u64_stats_read(&rq->stats.bytes); 3665 rdrops = u64_stats_read(&rq->stats.drops); 3666 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 3667 3668 tot->rx_packets += rpackets; 3669 tot->tx_packets += tpackets; 3670 tot->rx_bytes += rbytes; 3671 tot->tx_bytes += tbytes; 3672 tot->rx_dropped += rdrops; 3673 tot->tx_errors += terrors; 3674 } 3675 3676 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); 3677 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); 3678 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); 3679 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); 3680 } 3681 3682 static void virtnet_ack_link_announce(struct virtnet_info *vi) 3683 { 3684 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, 3685 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) 3686 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); 3687 } 3688 3689 static bool virtnet_commit_rss_command(struct virtnet_info *vi); 3690 3691 static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs) 3692 { 3693 u32 indir_val = 0; 3694 int i = 0; 3695 3696 for (; i < vi->rss_indir_table_size; ++i) { 3697 indir_val = ethtool_rxfh_indir_default(i, queue_pairs); 3698 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val); 3699 } 3700 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs); 3701 } 3702 3703 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) 3704 { 3705 struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; 3706 struct virtio_net_rss_config_hdr *old_rss_hdr; 3707 struct virtio_net_rss_config_trailer old_rss_trailer; 3708 struct net_device *dev = vi->dev; 3709 struct scatterlist sg; 3710 3711 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 3712 return 0; 3713 3714 /* Firstly check if we need update rss. Do updating if both (1) rss enabled and 3715 * (2) no user configuration. 3716 * 3717 * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is, 3718 * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs 3719 * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly. 3720 */ 3721 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { 3722 old_rss_hdr = vi->rss_hdr; 3723 old_rss_trailer = vi->rss_trailer; 3724 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); 3725 if (!vi->rss_hdr) { 3726 vi->rss_hdr = old_rss_hdr; 3727 return -ENOMEM; 3728 } 3729 3730 *vi->rss_hdr = *old_rss_hdr; 3731 virtnet_rss_update_by_qpairs(vi, queue_pairs); 3732 3733 if (!virtnet_commit_rss_command(vi)) { 3734 /* restore ctrl_rss if commit_rss_command failed */ 3735 devm_kfree(&dev->dev, vi->rss_hdr); 3736 vi->rss_hdr = old_rss_hdr; 3737 vi->rss_trailer = old_rss_trailer; 3738 3739 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", 3740 queue_pairs); 3741 return -EINVAL; 3742 } 3743 devm_kfree(&dev->dev, old_rss_hdr); 3744 goto succ; 3745 } 3746 3747 mq = kzalloc(sizeof(*mq), GFP_KERNEL); 3748 if (!mq) 3749 return -ENOMEM; 3750 3751 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 3752 sg_init_one(&sg, mq, sizeof(*mq)); 3753 3754 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 3755 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 3756 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", 3757 queue_pairs); 3758 return -EINVAL; 3759 } 3760 succ: 3761 vi->curr_queue_pairs = queue_pairs; 3762 /* virtnet_open() will refill when device is going to up. */ 3763 spin_lock_bh(&vi->refill_lock); 3764 if (dev->flags & IFF_UP && vi->refill_enabled) 3765 schedule_delayed_work(&vi->refill, 0); 3766 spin_unlock_bh(&vi->refill_lock); 3767 3768 return 0; 3769 } 3770 3771 static int virtnet_close(struct net_device *dev) 3772 { 3773 struct virtnet_info *vi = netdev_priv(dev); 3774 int i; 3775 3776 /* Make sure NAPI doesn't schedule refill work */ 3777 disable_delayed_refill(vi); 3778 /* Make sure refill_work doesn't re-enable napi! */ 3779 cancel_delayed_work_sync(&vi->refill); 3780 /* Prevent the config change callback from changing carrier 3781 * after close 3782 */ 3783 virtio_config_driver_disable(vi->vdev); 3784 /* Stop getting status/speed updates: we don't care until next 3785 * open 3786 */ 3787 cancel_work_sync(&vi->config_work); 3788 3789 for (i = 0; i < vi->max_queue_pairs; i++) { 3790 virtnet_disable_queue_pair(vi, i); 3791 virtnet_cancel_dim(vi, &vi->rq[i].dim); 3792 } 3793 3794 netif_carrier_off(dev); 3795 3796 return 0; 3797 } 3798 3799 static void virtnet_rx_mode_work(struct work_struct *work) 3800 { 3801 struct virtnet_info *vi = 3802 container_of(work, struct virtnet_info, rx_mode_work); 3803 u8 *promisc_allmulti __free(kfree) = NULL; 3804 struct net_device *dev = vi->dev; 3805 struct scatterlist sg[2]; 3806 struct virtio_net_ctrl_mac *mac_data; 3807 struct netdev_hw_addr *ha; 3808 int uc_count; 3809 int mc_count; 3810 void *buf; 3811 int i; 3812 3813 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ 3814 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 3815 return; 3816 3817 promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_KERNEL); 3818 if (!promisc_allmulti) { 3819 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); 3820 return; 3821 } 3822 3823 rtnl_lock(); 3824 3825 *promisc_allmulti = !!(dev->flags & IFF_PROMISC); 3826 sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); 3827 3828 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 3829 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 3830 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 3831 *promisc_allmulti ? "en" : "dis"); 3832 3833 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); 3834 sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); 3835 3836 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 3837 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 3838 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 3839 *promisc_allmulti ? "en" : "dis"); 3840 3841 netif_addr_lock_bh(dev); 3842 3843 uc_count = netdev_uc_count(dev); 3844 mc_count = netdev_mc_count(dev); 3845 /* MAC filter - use one buffer for both lists */ 3846 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + 3847 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 3848 mac_data = buf; 3849 if (!buf) { 3850 netif_addr_unlock_bh(dev); 3851 rtnl_unlock(); 3852 return; 3853 } 3854 3855 sg_init_table(sg, 2); 3856 3857 /* Store the unicast list and count in the front of the buffer */ 3858 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); 3859 i = 0; 3860 netdev_for_each_uc_addr(ha, dev) 3861 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 3862 3863 sg_set_buf(&sg[0], mac_data, 3864 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); 3865 3866 /* multicast list and count fill the end */ 3867 mac_data = (void *)&mac_data->macs[uc_count][0]; 3868 3869 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); 3870 i = 0; 3871 netdev_for_each_mc_addr(ha, dev) 3872 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 3873 3874 netif_addr_unlock_bh(dev); 3875 3876 sg_set_buf(&sg[1], mac_data, 3877 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 3878 3879 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 3880 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) 3881 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); 3882 3883 rtnl_unlock(); 3884 3885 kfree(buf); 3886 } 3887 3888 static void virtnet_set_rx_mode(struct net_device *dev) 3889 { 3890 struct virtnet_info *vi = netdev_priv(dev); 3891 3892 if (vi->rx_mode_work_enabled) 3893 schedule_work(&vi->rx_mode_work); 3894 } 3895 3896 static int virtnet_vlan_rx_add_vid(struct net_device *dev, 3897 __be16 proto, u16 vid) 3898 { 3899 struct virtnet_info *vi = netdev_priv(dev); 3900 __virtio16 *_vid __free(kfree) = NULL; 3901 struct scatterlist sg; 3902 3903 _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); 3904 if (!_vid) 3905 return -ENOMEM; 3906 3907 *_vid = cpu_to_virtio16(vi->vdev, vid); 3908 sg_init_one(&sg, _vid, sizeof(*_vid)); 3909 3910 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 3911 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 3912 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 3913 return 0; 3914 } 3915 3916 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, 3917 __be16 proto, u16 vid) 3918 { 3919 struct virtnet_info *vi = netdev_priv(dev); 3920 __virtio16 *_vid __free(kfree) = NULL; 3921 struct scatterlist sg; 3922 3923 _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); 3924 if (!_vid) 3925 return -ENOMEM; 3926 3927 *_vid = cpu_to_virtio16(vi->vdev, vid); 3928 sg_init_one(&sg, _vid, sizeof(*_vid)); 3929 3930 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 3931 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 3932 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); 3933 return 0; 3934 } 3935 3936 static void virtnet_clean_affinity(struct virtnet_info *vi) 3937 { 3938 int i; 3939 3940 if (vi->affinity_hint_set) { 3941 for (i = 0; i < vi->max_queue_pairs; i++) { 3942 virtqueue_set_affinity(vi->rq[i].vq, NULL); 3943 virtqueue_set_affinity(vi->sq[i].vq, NULL); 3944 } 3945 3946 vi->affinity_hint_set = false; 3947 } 3948 } 3949 3950 static void virtnet_set_affinity(struct virtnet_info *vi) 3951 { 3952 cpumask_var_t mask; 3953 int stragglers; 3954 int group_size; 3955 int i, start = 0, cpu; 3956 int num_cpu; 3957 int stride; 3958 3959 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { 3960 virtnet_clean_affinity(vi); 3961 return; 3962 } 3963 3964 num_cpu = num_online_cpus(); 3965 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); 3966 stragglers = num_cpu >= vi->curr_queue_pairs ? 3967 num_cpu % vi->curr_queue_pairs : 3968 0; 3969 3970 for (i = 0; i < vi->curr_queue_pairs; i++) { 3971 group_size = stride + (i < stragglers ? 1 : 0); 3972 3973 for_each_online_cpu_wrap(cpu, start) { 3974 if (!group_size--) { 3975 start = cpu; 3976 break; 3977 } 3978 cpumask_set_cpu(cpu, mask); 3979 } 3980 3981 virtqueue_set_affinity(vi->rq[i].vq, mask); 3982 virtqueue_set_affinity(vi->sq[i].vq, mask); 3983 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); 3984 cpumask_clear(mask); 3985 } 3986 3987 vi->affinity_hint_set = true; 3988 free_cpumask_var(mask); 3989 } 3990 3991 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) 3992 { 3993 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 3994 node); 3995 virtnet_set_affinity(vi); 3996 return 0; 3997 } 3998 3999 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) 4000 { 4001 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 4002 node_dead); 4003 virtnet_set_affinity(vi); 4004 return 0; 4005 } 4006 4007 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) 4008 { 4009 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, 4010 node); 4011 4012 virtnet_clean_affinity(vi); 4013 return 0; 4014 } 4015 4016 static enum cpuhp_state virtionet_online; 4017 4018 static int virtnet_cpu_notif_add(struct virtnet_info *vi) 4019 { 4020 int ret; 4021 4022 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); 4023 if (ret) 4024 return ret; 4025 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, 4026 &vi->node_dead); 4027 if (!ret) 4028 return ret; 4029 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 4030 return ret; 4031 } 4032 4033 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) 4034 { 4035 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); 4036 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, 4037 &vi->node_dead); 4038 } 4039 4040 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, 4041 u16 vqn, u32 max_usecs, u32 max_packets) 4042 { 4043 struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL; 4044 struct scatterlist sgs; 4045 4046 coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL); 4047 if (!coal_vq) 4048 return -ENOMEM; 4049 4050 coal_vq->vqn = cpu_to_le16(vqn); 4051 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); 4052 coal_vq->coal.max_packets = cpu_to_le32(max_packets); 4053 sg_init_one(&sgs, coal_vq, sizeof(*coal_vq)); 4054 4055 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 4056 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, 4057 &sgs)) 4058 return -EINVAL; 4059 4060 return 0; 4061 } 4062 4063 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, 4064 u16 queue, u32 max_usecs, 4065 u32 max_packets) 4066 { 4067 int err; 4068 4069 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 4070 return -EOPNOTSUPP; 4071 4072 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), 4073 max_usecs, max_packets); 4074 if (err) 4075 return err; 4076 4077 vi->rq[queue].intr_coal.max_usecs = max_usecs; 4078 vi->rq[queue].intr_coal.max_packets = max_packets; 4079 4080 return 0; 4081 } 4082 4083 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, 4084 u16 queue, u32 max_usecs, 4085 u32 max_packets) 4086 { 4087 int err; 4088 4089 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 4090 return -EOPNOTSUPP; 4091 4092 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), 4093 max_usecs, max_packets); 4094 if (err) 4095 return err; 4096 4097 vi->sq[queue].intr_coal.max_usecs = max_usecs; 4098 vi->sq[queue].intr_coal.max_packets = max_packets; 4099 4100 return 0; 4101 } 4102 4103 static void virtnet_get_ringparam(struct net_device *dev, 4104 struct ethtool_ringparam *ring, 4105 struct kernel_ethtool_ringparam *kernel_ring, 4106 struct netlink_ext_ack *extack) 4107 { 4108 struct virtnet_info *vi = netdev_priv(dev); 4109 4110 ring->rx_max_pending = vi->rq[0].vq->num_max; 4111 ring->tx_max_pending = vi->sq[0].vq->num_max; 4112 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 4113 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 4114 } 4115 4116 static int virtnet_set_ringparam(struct net_device *dev, 4117 struct ethtool_ringparam *ring, 4118 struct kernel_ethtool_ringparam *kernel_ring, 4119 struct netlink_ext_ack *extack) 4120 { 4121 struct virtnet_info *vi = netdev_priv(dev); 4122 u32 rx_pending, tx_pending; 4123 struct receive_queue *rq; 4124 struct send_queue *sq; 4125 int i, err; 4126 4127 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 4128 return -EINVAL; 4129 4130 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 4131 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 4132 4133 if (ring->rx_pending == rx_pending && 4134 ring->tx_pending == tx_pending) 4135 return 0; 4136 4137 if (ring->rx_pending > vi->rq[0].vq->num_max) 4138 return -EINVAL; 4139 4140 if (ring->tx_pending > vi->sq[0].vq->num_max) 4141 return -EINVAL; 4142 4143 for (i = 0; i < vi->max_queue_pairs; i++) { 4144 rq = vi->rq + i; 4145 sq = vi->sq + i; 4146 4147 if (ring->tx_pending != tx_pending) { 4148 err = virtnet_tx_resize(vi, sq, ring->tx_pending); 4149 if (err) 4150 return err; 4151 4152 /* Upon disabling and re-enabling a transmit virtqueue, the device must 4153 * set the coalescing parameters of the virtqueue to those configured 4154 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver 4155 * did not set any TX coalescing parameters, to 0. 4156 */ 4157 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, 4158 vi->intr_coal_tx.max_usecs, 4159 vi->intr_coal_tx.max_packets); 4160 4161 /* Don't break the tx resize action if the vq coalescing is not 4162 * supported. The same is true for rx resize below. 4163 */ 4164 if (err && err != -EOPNOTSUPP) 4165 return err; 4166 } 4167 4168 if (ring->rx_pending != rx_pending) { 4169 err = virtnet_rx_resize(vi, rq, ring->rx_pending); 4170 if (err) 4171 return err; 4172 4173 /* The reason is same as the transmit virtqueue reset */ 4174 mutex_lock(&vi->rq[i].dim_lock); 4175 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, 4176 vi->intr_coal_rx.max_usecs, 4177 vi->intr_coal_rx.max_packets); 4178 mutex_unlock(&vi->rq[i].dim_lock); 4179 if (err && err != -EOPNOTSUPP) 4180 return err; 4181 } 4182 } 4183 4184 return 0; 4185 } 4186 4187 static bool virtnet_commit_rss_command(struct virtnet_info *vi) 4188 { 4189 struct net_device *dev = vi->dev; 4190 struct scatterlist sgs[2]; 4191 4192 /* prepare sgs */ 4193 sg_init_table(sgs, 2); 4194 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi)); 4195 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi)); 4196 4197 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 4198 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG 4199 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) 4200 goto err; 4201 4202 return true; 4203 4204 err: 4205 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); 4206 return false; 4207 4208 } 4209 4210 static void virtnet_init_default_rss(struct virtnet_info *vi) 4211 { 4212 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported); 4213 vi->rss_hash_types_saved = vi->rss_hash_types_supported; 4214 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size 4215 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; 4216 vi->rss_hdr->unclassified_queue = 0; 4217 4218 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); 4219 4220 vi->rss_trailer.hash_key_length = vi->rss_key_size; 4221 4222 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); 4223 } 4224 4225 static int virtnet_get_hashflow(struct net_device *dev, 4226 struct ethtool_rxfh_fields *info) 4227 { 4228 struct virtnet_info *vi = netdev_priv(dev); 4229 4230 info->data = 0; 4231 switch (info->flow_type) { 4232 case TCP_V4_FLOW: 4233 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { 4234 info->data = RXH_IP_SRC | RXH_IP_DST | 4235 RXH_L4_B_0_1 | RXH_L4_B_2_3; 4236 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 4237 info->data = RXH_IP_SRC | RXH_IP_DST; 4238 } 4239 break; 4240 case TCP_V6_FLOW: 4241 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { 4242 info->data = RXH_IP_SRC | RXH_IP_DST | 4243 RXH_L4_B_0_1 | RXH_L4_B_2_3; 4244 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 4245 info->data = RXH_IP_SRC | RXH_IP_DST; 4246 } 4247 break; 4248 case UDP_V4_FLOW: 4249 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { 4250 info->data = RXH_IP_SRC | RXH_IP_DST | 4251 RXH_L4_B_0_1 | RXH_L4_B_2_3; 4252 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { 4253 info->data = RXH_IP_SRC | RXH_IP_DST; 4254 } 4255 break; 4256 case UDP_V6_FLOW: 4257 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { 4258 info->data = RXH_IP_SRC | RXH_IP_DST | 4259 RXH_L4_B_0_1 | RXH_L4_B_2_3; 4260 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { 4261 info->data = RXH_IP_SRC | RXH_IP_DST; 4262 } 4263 break; 4264 case IPV4_FLOW: 4265 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) 4266 info->data = RXH_IP_SRC | RXH_IP_DST; 4267 4268 break; 4269 case IPV6_FLOW: 4270 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) 4271 info->data = RXH_IP_SRC | RXH_IP_DST; 4272 4273 break; 4274 default: 4275 info->data = 0; 4276 break; 4277 } 4278 4279 return 0; 4280 } 4281 4282 static int virtnet_set_hashflow(struct net_device *dev, 4283 const struct ethtool_rxfh_fields *info, 4284 struct netlink_ext_ack *extack) 4285 { 4286 struct virtnet_info *vi = netdev_priv(dev); 4287 u32 new_hashtypes = vi->rss_hash_types_saved; 4288 bool is_disable = info->data & RXH_DISCARD; 4289 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); 4290 4291 /* supports only 'sd', 'sdfn' and 'r' */ 4292 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) 4293 return -EINVAL; 4294 4295 switch (info->flow_type) { 4296 case TCP_V4_FLOW: 4297 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4); 4298 if (!is_disable) 4299 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 4300 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0); 4301 break; 4302 case UDP_V4_FLOW: 4303 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4); 4304 if (!is_disable) 4305 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4 4306 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0); 4307 break; 4308 case IPV4_FLOW: 4309 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4; 4310 if (!is_disable) 4311 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4; 4312 break; 4313 case TCP_V6_FLOW: 4314 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6); 4315 if (!is_disable) 4316 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 4317 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0); 4318 break; 4319 case UDP_V6_FLOW: 4320 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6); 4321 if (!is_disable) 4322 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6 4323 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0); 4324 break; 4325 case IPV6_FLOW: 4326 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6; 4327 if (!is_disable) 4328 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6; 4329 break; 4330 default: 4331 /* unsupported flow */ 4332 return -EINVAL; 4333 } 4334 4335 /* if unsupported hashtype was set */ 4336 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) 4337 return -EINVAL; 4338 4339 if (new_hashtypes != vi->rss_hash_types_saved) { 4340 vi->rss_hash_types_saved = new_hashtypes; 4341 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); 4342 if (vi->dev->features & NETIF_F_RXHASH) 4343 if (!virtnet_commit_rss_command(vi)) 4344 return -EINVAL; 4345 } 4346 4347 return 0; 4348 } 4349 4350 static void virtnet_get_drvinfo(struct net_device *dev, 4351 struct ethtool_drvinfo *info) 4352 { 4353 struct virtnet_info *vi = netdev_priv(dev); 4354 struct virtio_device *vdev = vi->vdev; 4355 4356 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 4357 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); 4358 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); 4359 4360 } 4361 4362 /* TODO: Eliminate OOO packets during switching */ 4363 static int virtnet_set_channels(struct net_device *dev, 4364 struct ethtool_channels *channels) 4365 { 4366 struct virtnet_info *vi = netdev_priv(dev); 4367 u16 queue_pairs = channels->combined_count; 4368 int err; 4369 4370 /* We don't support separate rx/tx channels. 4371 * We don't allow setting 'other' channels. 4372 */ 4373 if (channels->rx_count || channels->tx_count || channels->other_count) 4374 return -EINVAL; 4375 4376 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) 4377 return -EINVAL; 4378 4379 /* For now we don't support modifying channels while XDP is loaded 4380 * also when XDP is loaded all RX queues have XDP programs so we only 4381 * need to check a single RX queue. 4382 */ 4383 if (vi->rq[0].xdp_prog) 4384 return -EINVAL; 4385 4386 cpus_read_lock(); 4387 err = virtnet_set_queues(vi, queue_pairs); 4388 if (err) { 4389 cpus_read_unlock(); 4390 goto err; 4391 } 4392 virtnet_set_affinity(vi); 4393 cpus_read_unlock(); 4394 4395 netif_set_real_num_tx_queues(dev, queue_pairs); 4396 netif_set_real_num_rx_queues(dev, queue_pairs); 4397 err: 4398 return err; 4399 } 4400 4401 static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt, 4402 int num, int qid, const struct virtnet_stat_desc *desc) 4403 { 4404 int i; 4405 4406 if (qid < 0) { 4407 for (i = 0; i < num; ++i) 4408 ethtool_sprintf(p, noq_fmt, desc[i].desc); 4409 } else { 4410 for (i = 0; i < num; ++i) 4411 ethtool_sprintf(p, fmt, qid, desc[i].desc); 4412 } 4413 } 4414 4415 /* qid == -1: for rx/tx queue total field */ 4416 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data) 4417 { 4418 const struct virtnet_stat_desc *desc; 4419 const char *fmt, *noq_fmt; 4420 u8 *p = *data; 4421 u32 num; 4422 4423 if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) { 4424 noq_fmt = "cq_hw_%s"; 4425 4426 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { 4427 desc = &virtnet_stats_cvq_desc[0]; 4428 num = ARRAY_SIZE(virtnet_stats_cvq_desc); 4429 4430 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); 4431 } 4432 } 4433 4434 if (type == VIRTNET_Q_TYPE_RX) { 4435 fmt = "rx%u_%s"; 4436 noq_fmt = "rx_%s"; 4437 4438 desc = &virtnet_rq_stats_desc[0]; 4439 num = ARRAY_SIZE(virtnet_rq_stats_desc); 4440 4441 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4442 4443 fmt = "rx%u_hw_%s"; 4444 noq_fmt = "rx_hw_%s"; 4445 4446 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { 4447 desc = &virtnet_stats_rx_basic_desc[0]; 4448 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); 4449 4450 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4451 } 4452 4453 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { 4454 desc = &virtnet_stats_rx_csum_desc[0]; 4455 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); 4456 4457 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4458 } 4459 4460 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { 4461 desc = &virtnet_stats_rx_speed_desc[0]; 4462 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); 4463 4464 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4465 } 4466 } 4467 4468 if (type == VIRTNET_Q_TYPE_TX) { 4469 fmt = "tx%u_%s"; 4470 noq_fmt = "tx_%s"; 4471 4472 desc = &virtnet_sq_stats_desc[0]; 4473 num = ARRAY_SIZE(virtnet_sq_stats_desc); 4474 4475 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4476 4477 fmt = "tx%u_hw_%s"; 4478 noq_fmt = "tx_hw_%s"; 4479 4480 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { 4481 desc = &virtnet_stats_tx_basic_desc[0]; 4482 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); 4483 4484 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4485 } 4486 4487 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { 4488 desc = &virtnet_stats_tx_gso_desc[0]; 4489 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); 4490 4491 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4492 } 4493 4494 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { 4495 desc = &virtnet_stats_tx_speed_desc[0]; 4496 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); 4497 4498 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); 4499 } 4500 } 4501 4502 *data = p; 4503 } 4504 4505 struct virtnet_stats_ctx { 4506 /* The stats are write to qstats or ethtool -S */ 4507 bool to_qstat; 4508 4509 /* Used to calculate the offset inside the output buffer. */ 4510 u32 desc_num[3]; 4511 4512 /* The actual supported stat types. */ 4513 u64 bitmap[3]; 4514 4515 /* Used to calculate the reply buffer size. */ 4516 u32 size[3]; 4517 4518 /* Record the output buffer. */ 4519 u64 *data; 4520 }; 4521 4522 static void virtnet_stats_ctx_init(struct virtnet_info *vi, 4523 struct virtnet_stats_ctx *ctx, 4524 u64 *data, bool to_qstat) 4525 { 4526 u32 queue_type; 4527 4528 ctx->data = data; 4529 ctx->to_qstat = to_qstat; 4530 4531 if (to_qstat) { 4532 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); 4533 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); 4534 4535 queue_type = VIRTNET_Q_TYPE_RX; 4536 4537 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { 4538 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; 4539 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); 4540 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); 4541 } 4542 4543 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { 4544 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; 4545 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); 4546 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); 4547 } 4548 4549 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { 4550 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; 4551 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); 4552 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); 4553 } 4554 4555 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { 4556 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; 4557 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); 4558 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); 4559 } 4560 4561 queue_type = VIRTNET_Q_TYPE_TX; 4562 4563 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { 4564 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; 4565 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); 4566 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); 4567 } 4568 4569 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { 4570 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; 4571 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); 4572 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); 4573 } 4574 4575 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { 4576 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; 4577 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); 4578 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); 4579 } 4580 4581 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { 4582 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; 4583 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); 4584 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); 4585 } 4586 4587 return; 4588 } 4589 4590 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); 4591 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); 4592 4593 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { 4594 queue_type = VIRTNET_Q_TYPE_CQ; 4595 4596 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; 4597 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); 4598 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); 4599 } 4600 4601 queue_type = VIRTNET_Q_TYPE_RX; 4602 4603 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { 4604 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; 4605 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); 4606 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); 4607 } 4608 4609 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { 4610 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; 4611 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); 4612 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); 4613 } 4614 4615 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { 4616 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; 4617 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); 4618 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); 4619 } 4620 4621 queue_type = VIRTNET_Q_TYPE_TX; 4622 4623 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { 4624 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; 4625 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); 4626 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); 4627 } 4628 4629 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { 4630 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; 4631 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); 4632 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); 4633 } 4634 4635 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { 4636 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; 4637 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); 4638 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); 4639 } 4640 } 4641 4642 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq. 4643 * @sum: the position to store the sum values 4644 * @num: field num 4645 * @q_value: the first queue fields 4646 * @q_num: number of the queues 4647 */ 4648 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num) 4649 { 4650 u32 step = num; 4651 int i, j; 4652 u64 *p; 4653 4654 for (i = 0; i < num; ++i) { 4655 p = sum + i; 4656 *p = 0; 4657 4658 for (j = 0; j < q_num; ++j) 4659 *p += *(q_value + i + j * step); 4660 } 4661 } 4662 4663 static void virtnet_fill_total_fields(struct virtnet_info *vi, 4664 struct virtnet_stats_ctx *ctx) 4665 { 4666 u64 *data, *first_rx_q, *first_tx_q; 4667 u32 num_cq, num_rx, num_tx; 4668 4669 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; 4670 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; 4671 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; 4672 4673 first_rx_q = ctx->data + num_rx + num_tx + num_cq; 4674 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; 4675 4676 data = ctx->data; 4677 4678 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); 4679 4680 data = ctx->data + num_rx; 4681 4682 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); 4683 } 4684 4685 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid, 4686 struct virtnet_stats_ctx *ctx, 4687 const u8 *base, bool drv_stats, u8 reply_type) 4688 { 4689 const struct virtnet_stat_desc *desc; 4690 const u64_stats_t *v_stat; 4691 u64 offset, bitmap; 4692 const __le64 *v; 4693 u32 queue_type; 4694 int i, num; 4695 4696 queue_type = vq_type(vi, qid); 4697 bitmap = ctx->bitmap[queue_type]; 4698 4699 if (drv_stats) { 4700 if (queue_type == VIRTNET_Q_TYPE_RX) { 4701 desc = &virtnet_rq_stats_desc_qstat[0]; 4702 num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); 4703 } else { 4704 desc = &virtnet_sq_stats_desc_qstat[0]; 4705 num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); 4706 } 4707 4708 for (i = 0; i < num; ++i) { 4709 offset = desc[i].qstat_offset / sizeof(*ctx->data); 4710 v_stat = (const u64_stats_t *)(base + desc[i].offset); 4711 ctx->data[offset] = u64_stats_read(v_stat); 4712 } 4713 return; 4714 } 4715 4716 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { 4717 desc = &virtnet_stats_rx_basic_desc_qstat[0]; 4718 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); 4719 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC) 4720 goto found; 4721 } 4722 4723 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { 4724 desc = &virtnet_stats_rx_csum_desc_qstat[0]; 4725 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); 4726 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM) 4727 goto found; 4728 } 4729 4730 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) { 4731 desc = &virtnet_stats_rx_gso_desc_qstat[0]; 4732 num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); 4733 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO) 4734 goto found; 4735 } 4736 4737 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { 4738 desc = &virtnet_stats_rx_speed_desc_qstat[0]; 4739 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); 4740 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED) 4741 goto found; 4742 } 4743 4744 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { 4745 desc = &virtnet_stats_tx_basic_desc_qstat[0]; 4746 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); 4747 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC) 4748 goto found; 4749 } 4750 4751 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { 4752 desc = &virtnet_stats_tx_csum_desc_qstat[0]; 4753 num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); 4754 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM) 4755 goto found; 4756 } 4757 4758 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) { 4759 desc = &virtnet_stats_tx_gso_desc_qstat[0]; 4760 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); 4761 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO) 4762 goto found; 4763 } 4764 4765 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { 4766 desc = &virtnet_stats_tx_speed_desc_qstat[0]; 4767 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); 4768 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED) 4769 goto found; 4770 } 4771 4772 return; 4773 4774 found: 4775 for (i = 0; i < num; ++i) { 4776 offset = desc[i].qstat_offset / sizeof(*ctx->data); 4777 v = (const __le64 *)(base + desc[i].offset); 4778 ctx->data[offset] = le64_to_cpu(*v); 4779 } 4780 } 4781 4782 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S 4783 * The stats source is the device or the driver. 4784 * 4785 * @vi: virtio net info 4786 * @qid: the vq id 4787 * @ctx: stats ctx (initiated by virtnet_stats_ctx_init()) 4788 * @base: pointer to the device reply or the driver stats structure. 4789 * @drv_stats: designate the base type (device reply, driver stats) 4790 * @type: the type of the device reply (if drv_stats is true, this must be zero) 4791 */ 4792 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid, 4793 struct virtnet_stats_ctx *ctx, 4794 const u8 *base, bool drv_stats, u8 reply_type) 4795 { 4796 u32 queue_type, num_rx, num_tx, num_cq; 4797 const struct virtnet_stat_desc *desc; 4798 const u64_stats_t *v_stat; 4799 u64 offset, bitmap; 4800 const __le64 *v; 4801 int i, num; 4802 4803 if (ctx->to_qstat) 4804 return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type); 4805 4806 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; 4807 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; 4808 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; 4809 4810 queue_type = vq_type(vi, qid); 4811 bitmap = ctx->bitmap[queue_type]; 4812 4813 /* skip the total fields of pairs */ 4814 offset = num_rx + num_tx; 4815 4816 if (queue_type == VIRTNET_Q_TYPE_TX) { 4817 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); 4818 4819 num = ARRAY_SIZE(virtnet_sq_stats_desc); 4820 if (drv_stats) { 4821 desc = &virtnet_sq_stats_desc[0]; 4822 goto drv_stats; 4823 } 4824 4825 offset += num; 4826 4827 } else if (queue_type == VIRTNET_Q_TYPE_RX) { 4828 offset += num_cq + num_rx * (qid / 2); 4829 4830 num = ARRAY_SIZE(virtnet_rq_stats_desc); 4831 if (drv_stats) { 4832 desc = &virtnet_rq_stats_desc[0]; 4833 goto drv_stats; 4834 } 4835 4836 offset += num; 4837 } 4838 4839 if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) { 4840 desc = &virtnet_stats_cvq_desc[0]; 4841 num = ARRAY_SIZE(virtnet_stats_cvq_desc); 4842 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ) 4843 goto found; 4844 4845 offset += num; 4846 } 4847 4848 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { 4849 desc = &virtnet_stats_rx_basic_desc[0]; 4850 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); 4851 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC) 4852 goto found; 4853 4854 offset += num; 4855 } 4856 4857 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { 4858 desc = &virtnet_stats_rx_csum_desc[0]; 4859 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); 4860 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM) 4861 goto found; 4862 4863 offset += num; 4864 } 4865 4866 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { 4867 desc = &virtnet_stats_rx_speed_desc[0]; 4868 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); 4869 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED) 4870 goto found; 4871 4872 offset += num; 4873 } 4874 4875 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { 4876 desc = &virtnet_stats_tx_basic_desc[0]; 4877 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); 4878 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC) 4879 goto found; 4880 4881 offset += num; 4882 } 4883 4884 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) { 4885 desc = &virtnet_stats_tx_gso_desc[0]; 4886 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); 4887 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO) 4888 goto found; 4889 4890 offset += num; 4891 } 4892 4893 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { 4894 desc = &virtnet_stats_tx_speed_desc[0]; 4895 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); 4896 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED) 4897 goto found; 4898 4899 offset += num; 4900 } 4901 4902 return; 4903 4904 found: 4905 for (i = 0; i < num; ++i) { 4906 v = (const __le64 *)(base + desc[i].offset); 4907 ctx->data[offset + i] = le64_to_cpu(*v); 4908 } 4909 4910 return; 4911 4912 drv_stats: 4913 for (i = 0; i < num; ++i) { 4914 v_stat = (const u64_stats_t *)(base + desc[i].offset); 4915 ctx->data[offset + i] = u64_stats_read(v_stat); 4916 } 4917 } 4918 4919 static int __virtnet_get_hw_stats(struct virtnet_info *vi, 4920 struct virtnet_stats_ctx *ctx, 4921 struct virtio_net_ctrl_queue_stats *req, 4922 int req_size, void *reply, int res_size) 4923 { 4924 struct virtio_net_stats_reply_hdr *hdr; 4925 struct scatterlist sgs_in, sgs_out; 4926 void *p; 4927 u32 qid; 4928 int ok; 4929 4930 sg_init_one(&sgs_out, req, req_size); 4931 sg_init_one(&sgs_in, reply, res_size); 4932 4933 ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, 4934 VIRTIO_NET_CTRL_STATS_GET, 4935 &sgs_out, &sgs_in); 4936 4937 if (!ok) 4938 return ok; 4939 4940 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { 4941 hdr = p; 4942 qid = le16_to_cpu(hdr->vq_index); 4943 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); 4944 } 4945 4946 return 0; 4947 } 4948 4949 static void virtnet_make_stat_req(struct virtnet_info *vi, 4950 struct virtnet_stats_ctx *ctx, 4951 struct virtio_net_ctrl_queue_stats *req, 4952 int qid, int *idx) 4953 { 4954 int qtype = vq_type(vi, qid); 4955 u64 bitmap = ctx->bitmap[qtype]; 4956 4957 if (!bitmap) 4958 return; 4959 4960 req->stats[*idx].vq_index = cpu_to_le16(qid); 4961 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); 4962 *idx += 1; 4963 } 4964 4965 /* qid: -1: get stats of all vq. 4966 * > 0: get the stats for the special vq. This must not be cvq. 4967 */ 4968 static int virtnet_get_hw_stats(struct virtnet_info *vi, 4969 struct virtnet_stats_ctx *ctx, int qid) 4970 { 4971 int qnum, i, j, res_size, qtype, last_vq, first_vq; 4972 struct virtio_net_ctrl_queue_stats *req; 4973 bool enable_cvq; 4974 void *reply; 4975 int ok; 4976 4977 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) 4978 return 0; 4979 4980 if (qid == -1) { 4981 last_vq = vi->curr_queue_pairs * 2 - 1; 4982 first_vq = 0; 4983 enable_cvq = true; 4984 } else { 4985 last_vq = qid; 4986 first_vq = qid; 4987 enable_cvq = false; 4988 } 4989 4990 qnum = 0; 4991 res_size = 0; 4992 for (i = first_vq; i <= last_vq ; ++i) { 4993 qtype = vq_type(vi, i); 4994 if (ctx->bitmap[qtype]) { 4995 ++qnum; 4996 res_size += ctx->size[qtype]; 4997 } 4998 } 4999 5000 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { 5001 res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; 5002 qnum += 1; 5003 } 5004 5005 req = kcalloc(qnum, sizeof(*req), GFP_KERNEL); 5006 if (!req) 5007 return -ENOMEM; 5008 5009 reply = kmalloc(res_size, GFP_KERNEL); 5010 if (!reply) { 5011 kfree(req); 5012 return -ENOMEM; 5013 } 5014 5015 j = 0; 5016 for (i = first_vq; i <= last_vq ; ++i) 5017 virtnet_make_stat_req(vi, ctx, req, i, &j); 5018 5019 if (enable_cvq) 5020 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); 5021 5022 ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size); 5023 5024 kfree(req); 5025 kfree(reply); 5026 5027 return ok; 5028 } 5029 5030 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) 5031 { 5032 struct virtnet_info *vi = netdev_priv(dev); 5033 unsigned int i; 5034 u8 *p = data; 5035 5036 switch (stringset) { 5037 case ETH_SS_STATS: 5038 /* Generate the total field names. */ 5039 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); 5040 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); 5041 5042 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p); 5043 5044 for (i = 0; i < vi->curr_queue_pairs; ++i) 5045 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p); 5046 5047 for (i = 0; i < vi->curr_queue_pairs; ++i) 5048 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p); 5049 break; 5050 } 5051 } 5052 5053 static int virtnet_get_sset_count(struct net_device *dev, int sset) 5054 { 5055 struct virtnet_info *vi = netdev_priv(dev); 5056 struct virtnet_stats_ctx ctx = {0}; 5057 u32 pair_count; 5058 5059 switch (sset) { 5060 case ETH_SS_STATS: 5061 virtnet_stats_ctx_init(vi, &ctx, NULL, false); 5062 5063 pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX]; 5064 5065 return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] + 5066 vi->curr_queue_pairs * pair_count; 5067 default: 5068 return -EOPNOTSUPP; 5069 } 5070 } 5071 5072 static void virtnet_get_ethtool_stats(struct net_device *dev, 5073 struct ethtool_stats *stats, u64 *data) 5074 { 5075 struct virtnet_info *vi = netdev_priv(dev); 5076 struct virtnet_stats_ctx ctx = {0}; 5077 unsigned int start, i; 5078 const u8 *stats_base; 5079 5080 virtnet_stats_ctx_init(vi, &ctx, data, false); 5081 if (virtnet_get_hw_stats(vi, &ctx, -1)) 5082 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); 5083 5084 for (i = 0; i < vi->curr_queue_pairs; i++) { 5085 struct receive_queue *rq = &vi->rq[i]; 5086 struct send_queue *sq = &vi->sq[i]; 5087 5088 stats_base = (const u8 *)&rq->stats; 5089 do { 5090 start = u64_stats_fetch_begin(&rq->stats.syncp); 5091 virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0); 5092 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); 5093 5094 stats_base = (const u8 *)&sq->stats; 5095 do { 5096 start = u64_stats_fetch_begin(&sq->stats.syncp); 5097 virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0); 5098 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); 5099 } 5100 5101 virtnet_fill_total_fields(vi, &ctx); 5102 } 5103 5104 static void virtnet_get_channels(struct net_device *dev, 5105 struct ethtool_channels *channels) 5106 { 5107 struct virtnet_info *vi = netdev_priv(dev); 5108 5109 channels->combined_count = vi->curr_queue_pairs; 5110 channels->max_combined = vi->max_queue_pairs; 5111 channels->max_other = 0; 5112 channels->rx_count = 0; 5113 channels->tx_count = 0; 5114 channels->other_count = 0; 5115 } 5116 5117 static int virtnet_set_link_ksettings(struct net_device *dev, 5118 const struct ethtool_link_ksettings *cmd) 5119 { 5120 struct virtnet_info *vi = netdev_priv(dev); 5121 5122 return ethtool_virtdev_set_link_ksettings(dev, cmd, 5123 &vi->speed, &vi->duplex); 5124 } 5125 5126 static int virtnet_get_link_ksettings(struct net_device *dev, 5127 struct ethtool_link_ksettings *cmd) 5128 { 5129 struct virtnet_info *vi = netdev_priv(dev); 5130 5131 cmd->base.speed = vi->speed; 5132 cmd->base.duplex = vi->duplex; 5133 cmd->base.port = PORT_OTHER; 5134 5135 return 0; 5136 } 5137 5138 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, 5139 struct ethtool_coalesce *ec) 5140 { 5141 struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL; 5142 struct scatterlist sgs_tx; 5143 int i; 5144 5145 coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL); 5146 if (!coal_tx) 5147 return -ENOMEM; 5148 5149 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); 5150 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); 5151 sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx)); 5152 5153 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 5154 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, 5155 &sgs_tx)) 5156 return -EINVAL; 5157 5158 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; 5159 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; 5160 for (i = 0; i < vi->max_queue_pairs; i++) { 5161 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; 5162 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; 5163 } 5164 5165 return 0; 5166 } 5167 5168 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, 5169 struct ethtool_coalesce *ec) 5170 { 5171 struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; 5172 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; 5173 struct scatterlist sgs_rx; 5174 int i; 5175 5176 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 5177 return -EOPNOTSUPP; 5178 5179 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || 5180 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) 5181 return -EINVAL; 5182 5183 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { 5184 vi->rx_dim_enabled = true; 5185 for (i = 0; i < vi->max_queue_pairs; i++) { 5186 mutex_lock(&vi->rq[i].dim_lock); 5187 vi->rq[i].dim_enabled = true; 5188 mutex_unlock(&vi->rq[i].dim_lock); 5189 } 5190 return 0; 5191 } 5192 5193 coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); 5194 if (!coal_rx) 5195 return -ENOMEM; 5196 5197 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { 5198 vi->rx_dim_enabled = false; 5199 for (i = 0; i < vi->max_queue_pairs; i++) { 5200 mutex_lock(&vi->rq[i].dim_lock); 5201 vi->rq[i].dim_enabled = false; 5202 mutex_unlock(&vi->rq[i].dim_lock); 5203 } 5204 } 5205 5206 /* Since the per-queue coalescing params can be set, 5207 * we need apply the global new params even if they 5208 * are not updated. 5209 */ 5210 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 5211 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 5212 sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx)); 5213 5214 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 5215 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, 5216 &sgs_rx)) 5217 return -EINVAL; 5218 5219 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; 5220 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; 5221 for (i = 0; i < vi->max_queue_pairs; i++) { 5222 mutex_lock(&vi->rq[i].dim_lock); 5223 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; 5224 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; 5225 mutex_unlock(&vi->rq[i].dim_lock); 5226 } 5227 5228 return 0; 5229 } 5230 5231 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 5232 struct ethtool_coalesce *ec) 5233 { 5234 int err; 5235 5236 err = virtnet_send_tx_notf_coal_cmds(vi, ec); 5237 if (err) 5238 return err; 5239 5240 err = virtnet_send_rx_notf_coal_cmds(vi, ec); 5241 if (err) 5242 return err; 5243 5244 return 0; 5245 } 5246 5247 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, 5248 struct ethtool_coalesce *ec, 5249 u16 queue) 5250 { 5251 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; 5252 u32 max_usecs, max_packets; 5253 bool cur_rx_dim; 5254 int err; 5255 5256 mutex_lock(&vi->rq[queue].dim_lock); 5257 cur_rx_dim = vi->rq[queue].dim_enabled; 5258 max_usecs = vi->rq[queue].intr_coal.max_usecs; 5259 max_packets = vi->rq[queue].intr_coal.max_packets; 5260 5261 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || 5262 ec->rx_max_coalesced_frames != max_packets)) { 5263 mutex_unlock(&vi->rq[queue].dim_lock); 5264 return -EINVAL; 5265 } 5266 5267 if (rx_ctrl_dim_on && !cur_rx_dim) { 5268 vi->rq[queue].dim_enabled = true; 5269 mutex_unlock(&vi->rq[queue].dim_lock); 5270 return 0; 5271 } 5272 5273 if (!rx_ctrl_dim_on && cur_rx_dim) 5274 vi->rq[queue].dim_enabled = false; 5275 5276 /* If no params are updated, userspace ethtool will 5277 * reject the modification. 5278 */ 5279 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, 5280 ec->rx_coalesce_usecs, 5281 ec->rx_max_coalesced_frames); 5282 mutex_unlock(&vi->rq[queue].dim_lock); 5283 return err; 5284 } 5285 5286 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, 5287 struct ethtool_coalesce *ec, 5288 u16 queue) 5289 { 5290 int err; 5291 5292 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); 5293 if (err) 5294 return err; 5295 5296 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, 5297 ec->tx_coalesce_usecs, 5298 ec->tx_max_coalesced_frames); 5299 if (err) 5300 return err; 5301 5302 return 0; 5303 } 5304 5305 static void virtnet_rx_dim_work(struct work_struct *work) 5306 { 5307 struct dim *dim = container_of(work, struct dim, work); 5308 struct receive_queue *rq = container_of(dim, 5309 struct receive_queue, dim); 5310 struct virtnet_info *vi = rq->vq->vdev->priv; 5311 struct net_device *dev = vi->dev; 5312 struct dim_cq_moder update_moder; 5313 int qnum, err; 5314 5315 qnum = rq - vi->rq; 5316 5317 mutex_lock(&rq->dim_lock); 5318 if (!rq->dim_enabled) 5319 goto out; 5320 5321 update_moder = net_dim_get_rx_irq_moder(dev, dim); 5322 if (update_moder.usec != rq->intr_coal.max_usecs || 5323 update_moder.pkts != rq->intr_coal.max_packets) { 5324 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, 5325 update_moder.usec, 5326 update_moder.pkts); 5327 if (err) 5328 pr_debug("%s: Failed to send dim parameters on rxq%d\n", 5329 dev->name, qnum); 5330 } 5331 out: 5332 dim->state = DIM_START_MEASURE; 5333 mutex_unlock(&rq->dim_lock); 5334 } 5335 5336 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) 5337 { 5338 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL 5339 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated. 5340 */ 5341 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) 5342 return -EOPNOTSUPP; 5343 5344 if (ec->tx_max_coalesced_frames > 1 || 5345 ec->rx_max_coalesced_frames != 1) 5346 return -EINVAL; 5347 5348 return 0; 5349 } 5350 5351 static int virtnet_should_update_vq_weight(int dev_flags, int weight, 5352 int vq_weight, bool *should_update) 5353 { 5354 if (weight ^ vq_weight) { 5355 if (dev_flags & IFF_UP) 5356 return -EBUSY; 5357 *should_update = true; 5358 } 5359 5360 return 0; 5361 } 5362 5363 static int virtnet_set_coalesce(struct net_device *dev, 5364 struct ethtool_coalesce *ec, 5365 struct kernel_ethtool_coalesce *kernel_coal, 5366 struct netlink_ext_ack *extack) 5367 { 5368 struct virtnet_info *vi = netdev_priv(dev); 5369 int ret, queue_number, napi_weight, i; 5370 bool update_napi = false; 5371 5372 /* Can't change NAPI weight if the link is up */ 5373 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 5374 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { 5375 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, 5376 vi->sq[queue_number].napi.weight, 5377 &update_napi); 5378 if (ret) 5379 return ret; 5380 5381 if (update_napi) { 5382 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be 5383 * updated for the sake of simplicity, which might not be necessary 5384 */ 5385 break; 5386 } 5387 } 5388 5389 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) 5390 ret = virtnet_send_notf_coal_cmds(vi, ec); 5391 else 5392 ret = virtnet_coal_params_supported(ec); 5393 5394 if (ret) 5395 return ret; 5396 5397 if (update_napi) { 5398 /* xsk xmit depends on the tx napi. So if xsk is active, 5399 * prevent modifications to tx napi. 5400 */ 5401 for (i = queue_number; i < vi->max_queue_pairs; i++) { 5402 if (vi->sq[i].xsk_pool) 5403 return -EBUSY; 5404 } 5405 5406 for (; queue_number < vi->max_queue_pairs; queue_number++) 5407 vi->sq[queue_number].napi.weight = napi_weight; 5408 } 5409 5410 return ret; 5411 } 5412 5413 static int virtnet_get_coalesce(struct net_device *dev, 5414 struct ethtool_coalesce *ec, 5415 struct kernel_ethtool_coalesce *kernel_coal, 5416 struct netlink_ext_ack *extack) 5417 { 5418 struct virtnet_info *vi = netdev_priv(dev); 5419 5420 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 5421 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; 5422 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; 5423 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; 5424 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; 5425 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; 5426 } else { 5427 ec->rx_max_coalesced_frames = 1; 5428 5429 if (vi->sq[0].napi.weight) 5430 ec->tx_max_coalesced_frames = 1; 5431 } 5432 5433 return 0; 5434 } 5435 5436 static int virtnet_set_per_queue_coalesce(struct net_device *dev, 5437 u32 queue, 5438 struct ethtool_coalesce *ec) 5439 { 5440 struct virtnet_info *vi = netdev_priv(dev); 5441 int ret, napi_weight; 5442 bool update_napi = false; 5443 5444 if (queue >= vi->max_queue_pairs) 5445 return -EINVAL; 5446 5447 /* Can't change NAPI weight if the link is up */ 5448 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; 5449 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, 5450 vi->sq[queue].napi.weight, 5451 &update_napi); 5452 if (ret) 5453 return ret; 5454 5455 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 5456 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); 5457 else 5458 ret = virtnet_coal_params_supported(ec); 5459 5460 if (ret) 5461 return ret; 5462 5463 if (update_napi) 5464 vi->sq[queue].napi.weight = napi_weight; 5465 5466 return 0; 5467 } 5468 5469 static int virtnet_get_per_queue_coalesce(struct net_device *dev, 5470 u32 queue, 5471 struct ethtool_coalesce *ec) 5472 { 5473 struct virtnet_info *vi = netdev_priv(dev); 5474 5475 if (queue >= vi->max_queue_pairs) 5476 return -EINVAL; 5477 5478 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { 5479 mutex_lock(&vi->rq[queue].dim_lock); 5480 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; 5481 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; 5482 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; 5483 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; 5484 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; 5485 mutex_unlock(&vi->rq[queue].dim_lock); 5486 } else { 5487 ec->rx_max_coalesced_frames = 1; 5488 5489 if (vi->sq[queue].napi.weight) 5490 ec->tx_max_coalesced_frames = 1; 5491 } 5492 5493 return 0; 5494 } 5495 5496 static void virtnet_init_settings(struct net_device *dev) 5497 { 5498 struct virtnet_info *vi = netdev_priv(dev); 5499 5500 vi->speed = SPEED_UNKNOWN; 5501 vi->duplex = DUPLEX_UNKNOWN; 5502 } 5503 5504 static u32 virtnet_get_rxfh_key_size(struct net_device *dev) 5505 { 5506 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; 5507 } 5508 5509 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev) 5510 { 5511 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; 5512 } 5513 5514 static int virtnet_get_rxfh(struct net_device *dev, 5515 struct ethtool_rxfh_param *rxfh) 5516 { 5517 struct virtnet_info *vi = netdev_priv(dev); 5518 int i; 5519 5520 if (rxfh->indir) { 5521 for (i = 0; i < vi->rss_indir_table_size; ++i) 5522 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]); 5523 } 5524 5525 if (rxfh->key) 5526 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size); 5527 5528 rxfh->hfunc = ETH_RSS_HASH_TOP; 5529 5530 return 0; 5531 } 5532 5533 static int virtnet_set_rxfh(struct net_device *dev, 5534 struct ethtool_rxfh_param *rxfh, 5535 struct netlink_ext_ack *extack) 5536 { 5537 struct virtnet_info *vi = netdev_priv(dev); 5538 bool update = false; 5539 int i; 5540 5541 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 5542 rxfh->hfunc != ETH_RSS_HASH_TOP) 5543 return -EOPNOTSUPP; 5544 5545 if (rxfh->indir) { 5546 if (!vi->has_rss) 5547 return -EOPNOTSUPP; 5548 5549 for (i = 0; i < vi->rss_indir_table_size; ++i) 5550 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]); 5551 update = true; 5552 } 5553 5554 if (rxfh->key) { 5555 /* If either _F_HASH_REPORT or _F_RSS are negotiated, the 5556 * device provides hash calculation capabilities, that is, 5557 * hash_key is configured. 5558 */ 5559 if (!vi->has_rss && !vi->has_rss_hash_report) 5560 return -EOPNOTSUPP; 5561 5562 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size); 5563 update = true; 5564 } 5565 5566 if (update) 5567 virtnet_commit_rss_command(vi); 5568 5569 return 0; 5570 } 5571 5572 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 5573 { 5574 struct virtnet_info *vi = netdev_priv(dev); 5575 int rc = 0; 5576 5577 switch (info->cmd) { 5578 case ETHTOOL_GRXRINGS: 5579 info->data = vi->curr_queue_pairs; 5580 break; 5581 default: 5582 rc = -EOPNOTSUPP; 5583 } 5584 5585 return rc; 5586 } 5587 5588 static const struct ethtool_ops virtnet_ethtool_ops = { 5589 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 5590 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 5591 .get_drvinfo = virtnet_get_drvinfo, 5592 .get_link = ethtool_op_get_link, 5593 .get_ringparam = virtnet_get_ringparam, 5594 .set_ringparam = virtnet_set_ringparam, 5595 .get_strings = virtnet_get_strings, 5596 .get_sset_count = virtnet_get_sset_count, 5597 .get_ethtool_stats = virtnet_get_ethtool_stats, 5598 .set_channels = virtnet_set_channels, 5599 .get_channels = virtnet_get_channels, 5600 .get_ts_info = ethtool_op_get_ts_info, 5601 .get_link_ksettings = virtnet_get_link_ksettings, 5602 .set_link_ksettings = virtnet_set_link_ksettings, 5603 .set_coalesce = virtnet_set_coalesce, 5604 .get_coalesce = virtnet_get_coalesce, 5605 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce, 5606 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce, 5607 .get_rxfh_key_size = virtnet_get_rxfh_key_size, 5608 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, 5609 .get_rxfh = virtnet_get_rxfh, 5610 .set_rxfh = virtnet_set_rxfh, 5611 .get_rxfh_fields = virtnet_get_hashflow, 5612 .set_rxfh_fields = virtnet_set_hashflow, 5613 .get_rxnfc = virtnet_get_rxnfc, 5614 }; 5615 5616 static void virtnet_get_queue_stats_rx(struct net_device *dev, int i, 5617 struct netdev_queue_stats_rx *stats) 5618 { 5619 struct virtnet_info *vi = netdev_priv(dev); 5620 struct receive_queue *rq = &vi->rq[i]; 5621 struct virtnet_stats_ctx ctx = {0}; 5622 5623 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); 5624 5625 virtnet_get_hw_stats(vi, &ctx, i * 2); 5626 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); 5627 } 5628 5629 static void virtnet_get_queue_stats_tx(struct net_device *dev, int i, 5630 struct netdev_queue_stats_tx *stats) 5631 { 5632 struct virtnet_info *vi = netdev_priv(dev); 5633 struct send_queue *sq = &vi->sq[i]; 5634 struct virtnet_stats_ctx ctx = {0}; 5635 5636 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); 5637 5638 virtnet_get_hw_stats(vi, &ctx, i * 2 + 1); 5639 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); 5640 } 5641 5642 static void virtnet_get_base_stats(struct net_device *dev, 5643 struct netdev_queue_stats_rx *rx, 5644 struct netdev_queue_stats_tx *tx) 5645 { 5646 struct virtnet_info *vi = netdev_priv(dev); 5647 5648 /* The queue stats of the virtio-net will not be reset. So here we 5649 * return 0. 5650 */ 5651 rx->bytes = 0; 5652 rx->packets = 0; 5653 5654 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { 5655 rx->hw_drops = 0; 5656 rx->hw_drop_overruns = 0; 5657 } 5658 5659 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { 5660 rx->csum_unnecessary = 0; 5661 rx->csum_none = 0; 5662 rx->csum_bad = 0; 5663 } 5664 5665 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { 5666 rx->hw_gro_packets = 0; 5667 rx->hw_gro_bytes = 0; 5668 rx->hw_gro_wire_packets = 0; 5669 rx->hw_gro_wire_bytes = 0; 5670 } 5671 5672 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) 5673 rx->hw_drop_ratelimits = 0; 5674 5675 tx->bytes = 0; 5676 tx->packets = 0; 5677 tx->stop = 0; 5678 tx->wake = 0; 5679 5680 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { 5681 tx->hw_drops = 0; 5682 tx->hw_drop_errors = 0; 5683 } 5684 5685 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { 5686 tx->csum_none = 0; 5687 tx->needs_csum = 0; 5688 } 5689 5690 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { 5691 tx->hw_gso_packets = 0; 5692 tx->hw_gso_bytes = 0; 5693 tx->hw_gso_wire_packets = 0; 5694 tx->hw_gso_wire_bytes = 0; 5695 } 5696 5697 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) 5698 tx->hw_drop_ratelimits = 0; 5699 5700 netdev_stat_queue_sum(dev, 5701 dev->real_num_rx_queues, vi->max_queue_pairs, rx, 5702 dev->real_num_tx_queues, vi->max_queue_pairs, tx); 5703 } 5704 5705 static const struct netdev_stat_ops virtnet_stat_ops = { 5706 .get_queue_stats_rx = virtnet_get_queue_stats_rx, 5707 .get_queue_stats_tx = virtnet_get_queue_stats_tx, 5708 .get_base_stats = virtnet_get_base_stats, 5709 }; 5710 5711 static void virtnet_freeze_down(struct virtio_device *vdev) 5712 { 5713 struct virtnet_info *vi = vdev->priv; 5714 5715 /* Make sure no work handler is accessing the device */ 5716 flush_work(&vi->config_work); 5717 disable_rx_mode_work(vi); 5718 flush_work(&vi->rx_mode_work); 5719 5720 netif_tx_lock_bh(vi->dev); 5721 netif_device_detach(vi->dev); 5722 netif_tx_unlock_bh(vi->dev); 5723 if (netif_running(vi->dev)) { 5724 rtnl_lock(); 5725 virtnet_close(vi->dev); 5726 rtnl_unlock(); 5727 } 5728 } 5729 5730 static int init_vqs(struct virtnet_info *vi); 5731 5732 static int virtnet_restore_up(struct virtio_device *vdev) 5733 { 5734 struct virtnet_info *vi = vdev->priv; 5735 int err; 5736 5737 err = init_vqs(vi); 5738 if (err) 5739 return err; 5740 5741 virtio_device_ready(vdev); 5742 5743 enable_delayed_refill(vi); 5744 enable_rx_mode_work(vi); 5745 5746 if (netif_running(vi->dev)) { 5747 rtnl_lock(); 5748 err = virtnet_open(vi->dev); 5749 rtnl_unlock(); 5750 if (err) 5751 return err; 5752 } 5753 5754 netif_tx_lock_bh(vi->dev); 5755 netif_device_attach(vi->dev); 5756 netif_tx_unlock_bh(vi->dev); 5757 return err; 5758 } 5759 5760 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 5761 { 5762 __virtio64 *_offloads __free(kfree) = NULL; 5763 struct scatterlist sg; 5764 5765 _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); 5766 if (!_offloads) 5767 return -ENOMEM; 5768 5769 *_offloads = cpu_to_virtio64(vi->vdev, offloads); 5770 5771 sg_init_one(&sg, _offloads, sizeof(*_offloads)); 5772 5773 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 5774 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 5775 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); 5776 return -EINVAL; 5777 } 5778 5779 return 0; 5780 } 5781 5782 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) 5783 { 5784 u64 offloads = 0; 5785 5786 if (!vi->guest_offloads) 5787 return 0; 5788 5789 return virtnet_set_guest_offloads(vi, offloads); 5790 } 5791 5792 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) 5793 { 5794 u64 offloads = vi->guest_offloads; 5795 5796 if (!vi->guest_offloads) 5797 return 0; 5798 5799 return virtnet_set_guest_offloads(vi, offloads); 5800 } 5801 5802 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq, 5803 struct xsk_buff_pool *pool) 5804 { 5805 int err, qindex; 5806 5807 qindex = rq - vi->rq; 5808 5809 if (pool) { 5810 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); 5811 if (err < 0) 5812 return err; 5813 5814 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, 5815 MEM_TYPE_XSK_BUFF_POOL, NULL); 5816 if (err < 0) 5817 goto unreg; 5818 5819 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); 5820 } 5821 5822 virtnet_rx_pause(vi, rq); 5823 5824 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL); 5825 if (err) { 5826 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); 5827 5828 pool = NULL; 5829 } 5830 5831 rq->xsk_pool = pool; 5832 5833 virtnet_rx_resume(vi, rq); 5834 5835 if (pool) 5836 return 0; 5837 5838 unreg: 5839 xdp_rxq_info_unreg(&rq->xsk_rxq_info); 5840 return err; 5841 } 5842 5843 static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi, 5844 struct send_queue *sq, 5845 struct xsk_buff_pool *pool) 5846 { 5847 int err, qindex; 5848 5849 qindex = sq - vi->sq; 5850 5851 virtnet_tx_pause(vi, sq); 5852 5853 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, 5854 virtnet_sq_free_unused_buf_done); 5855 if (err) { 5856 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); 5857 pool = NULL; 5858 } 5859 5860 sq->xsk_pool = pool; 5861 5862 virtnet_tx_resume(vi, sq); 5863 5864 return err; 5865 } 5866 5867 static int virtnet_xsk_pool_enable(struct net_device *dev, 5868 struct xsk_buff_pool *pool, 5869 u16 qid) 5870 { 5871 struct virtnet_info *vi = netdev_priv(dev); 5872 struct receive_queue *rq; 5873 struct device *dma_dev; 5874 struct send_queue *sq; 5875 dma_addr_t hdr_dma; 5876 int err, size; 5877 5878 if (vi->hdr_len > xsk_pool_get_headroom(pool)) 5879 return -EINVAL; 5880 5881 /* In big_packets mode, xdp cannot work, so there is no need to 5882 * initialize xsk of rq. 5883 */ 5884 if (vi->big_packets && !vi->mergeable_rx_bufs) 5885 return -ENOENT; 5886 5887 if (qid >= vi->curr_queue_pairs) 5888 return -EINVAL; 5889 5890 sq = &vi->sq[qid]; 5891 rq = &vi->rq[qid]; 5892 5893 /* xsk assumes that tx and rx must have the same dma device. The af-xdp 5894 * may use one buffer to receive from the rx and reuse this buffer to 5895 * send by the tx. So the dma dev of sq and rq must be the same one. 5896 * 5897 * But vq->dma_dev allows every vq has the respective dma dev. So I 5898 * check the dma dev of vq and sq is the same dev. 5899 */ 5900 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) 5901 return -EINVAL; 5902 5903 dma_dev = virtqueue_dma_dev(rq->vq); 5904 if (!dma_dev) 5905 return -EINVAL; 5906 5907 size = virtqueue_get_vring_size(rq->vq); 5908 5909 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); 5910 if (!rq->xsk_buffs) 5911 return -ENOMEM; 5912 5913 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, 5914 DMA_TO_DEVICE, 0); 5915 if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) { 5916 err = -ENOMEM; 5917 goto err_free_buffs; 5918 } 5919 5920 err = xsk_pool_dma_map(pool, dma_dev, 0); 5921 if (err) 5922 goto err_xsk_map; 5923 5924 err = virtnet_rq_bind_xsk_pool(vi, rq, pool); 5925 if (err) 5926 goto err_rq; 5927 5928 err = virtnet_sq_bind_xsk_pool(vi, sq, pool); 5929 if (err) 5930 goto err_sq; 5931 5932 /* Now, we do not support tx offload(such as tx csum), so all the tx 5933 * virtnet hdr is zero. So all the tx packets can share a single hdr. 5934 */ 5935 sq->xsk_hdr_dma_addr = hdr_dma; 5936 5937 return 0; 5938 5939 err_sq: 5940 virtnet_rq_bind_xsk_pool(vi, rq, NULL); 5941 err_rq: 5942 xsk_pool_dma_unmap(pool, 0); 5943 err_xsk_map: 5944 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, 5945 DMA_TO_DEVICE, 0); 5946 err_free_buffs: 5947 kvfree(rq->xsk_buffs); 5948 return err; 5949 } 5950 5951 static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid) 5952 { 5953 struct virtnet_info *vi = netdev_priv(dev); 5954 struct xsk_buff_pool *pool; 5955 struct receive_queue *rq; 5956 struct send_queue *sq; 5957 int err; 5958 5959 if (qid >= vi->curr_queue_pairs) 5960 return -EINVAL; 5961 5962 sq = &vi->sq[qid]; 5963 rq = &vi->rq[qid]; 5964 5965 pool = rq->xsk_pool; 5966 5967 err = virtnet_rq_bind_xsk_pool(vi, rq, NULL); 5968 err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL); 5969 5970 xsk_pool_dma_unmap(pool, 0); 5971 5972 virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr, 5973 vi->hdr_len, DMA_TO_DEVICE, 0); 5974 kvfree(rq->xsk_buffs); 5975 5976 return err; 5977 } 5978 5979 static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp) 5980 { 5981 if (xdp->xsk.pool) 5982 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, 5983 xdp->xsk.queue_id); 5984 else 5985 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); 5986 } 5987 5988 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 5989 struct netlink_ext_ack *extack) 5990 { 5991 unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM + 5992 sizeof(struct skb_shared_info)); 5993 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; 5994 struct virtnet_info *vi = netdev_priv(dev); 5995 struct bpf_prog *old_prog; 5996 u16 xdp_qp = 0, curr_qp; 5997 int i, err; 5998 5999 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 6000 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 6001 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 6002 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 6003 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 6004 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || 6005 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || 6006 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { 6007 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); 6008 return -EOPNOTSUPP; 6009 } 6010 6011 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { 6012 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); 6013 return -EINVAL; 6014 } 6015 6016 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { 6017 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); 6018 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); 6019 return -EINVAL; 6020 } 6021 6022 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; 6023 if (prog) 6024 xdp_qp = nr_cpu_ids; 6025 6026 /* XDP requires extra queues for XDP_TX */ 6027 if (curr_qp + xdp_qp > vi->max_queue_pairs) { 6028 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", 6029 curr_qp + xdp_qp, vi->max_queue_pairs); 6030 xdp_qp = 0; 6031 } 6032 6033 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); 6034 if (!prog && !old_prog) 6035 return 0; 6036 6037 if (prog) 6038 bpf_prog_add(prog, vi->max_queue_pairs - 1); 6039 6040 virtnet_rx_pause_all(vi); 6041 6042 /* Make sure NAPI is not using any XDP TX queues for RX. */ 6043 if (netif_running(dev)) { 6044 for (i = 0; i < vi->max_queue_pairs; i++) 6045 virtnet_napi_tx_disable(&vi->sq[i]); 6046 } 6047 6048 if (!prog) { 6049 for (i = 0; i < vi->max_queue_pairs; i++) { 6050 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 6051 if (i == 0) 6052 virtnet_restore_guest_offloads(vi); 6053 } 6054 synchronize_net(); 6055 } 6056 6057 err = virtnet_set_queues(vi, curr_qp + xdp_qp); 6058 if (err) 6059 goto err; 6060 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 6061 vi->xdp_queue_pairs = xdp_qp; 6062 6063 if (prog) { 6064 vi->xdp_enabled = true; 6065 for (i = 0; i < vi->max_queue_pairs; i++) { 6066 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 6067 if (i == 0 && !old_prog) 6068 virtnet_clear_guest_offloads(vi); 6069 } 6070 if (!old_prog) 6071 xdp_features_set_redirect_target(dev, true); 6072 } else { 6073 xdp_features_clear_redirect_target(dev); 6074 vi->xdp_enabled = false; 6075 } 6076 6077 virtnet_rx_resume_all(vi); 6078 for (i = 0; i < vi->max_queue_pairs; i++) { 6079 if (old_prog) 6080 bpf_prog_put(old_prog); 6081 if (netif_running(dev)) 6082 virtnet_napi_tx_enable(&vi->sq[i]); 6083 } 6084 6085 return 0; 6086 6087 err: 6088 if (!prog) { 6089 virtnet_clear_guest_offloads(vi); 6090 for (i = 0; i < vi->max_queue_pairs; i++) 6091 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); 6092 } 6093 6094 virtnet_rx_resume_all(vi); 6095 if (netif_running(dev)) { 6096 for (i = 0; i < vi->max_queue_pairs; i++) 6097 virtnet_napi_tx_enable(&vi->sq[i]); 6098 } 6099 if (prog) 6100 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 6101 return err; 6102 } 6103 6104 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 6105 { 6106 switch (xdp->command) { 6107 case XDP_SETUP_PROG: 6108 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); 6109 case XDP_SETUP_XSK_POOL: 6110 return virtnet_xsk_pool_setup(dev, xdp); 6111 default: 6112 return -EINVAL; 6113 } 6114 } 6115 6116 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, 6117 size_t len) 6118 { 6119 struct virtnet_info *vi = netdev_priv(dev); 6120 int ret; 6121 6122 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) 6123 return -EOPNOTSUPP; 6124 6125 ret = snprintf(buf, len, "sby"); 6126 if (ret >= len) 6127 return -EOPNOTSUPP; 6128 6129 return 0; 6130 } 6131 6132 static int virtnet_set_features(struct net_device *dev, 6133 netdev_features_t features) 6134 { 6135 struct virtnet_info *vi = netdev_priv(dev); 6136 u64 offloads; 6137 int err; 6138 6139 if ((dev->features ^ features) & NETIF_F_GRO_HW) { 6140 if (vi->xdp_enabled) 6141 return -EBUSY; 6142 6143 if (features & NETIF_F_GRO_HW) 6144 offloads = vi->guest_offloads_capable; 6145 else 6146 offloads = vi->guest_offloads_capable & 6147 ~GUEST_OFFLOAD_GRO_HW_MASK; 6148 6149 err = virtnet_set_guest_offloads(vi, offloads); 6150 if (err) 6151 return err; 6152 vi->guest_offloads = offloads; 6153 } 6154 6155 if ((dev->features ^ features) & NETIF_F_RXHASH) { 6156 if (features & NETIF_F_RXHASH) 6157 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); 6158 else 6159 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); 6160 6161 if (!virtnet_commit_rss_command(vi)) 6162 return -EINVAL; 6163 } 6164 6165 return 0; 6166 } 6167 6168 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) 6169 { 6170 struct virtnet_info *priv = netdev_priv(dev); 6171 struct send_queue *sq = &priv->sq[txqueue]; 6172 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 6173 6174 u64_stats_update_begin(&sq->stats.syncp); 6175 u64_stats_inc(&sq->stats.tx_timeouts); 6176 u64_stats_update_end(&sq->stats.syncp); 6177 6178 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", 6179 txqueue, sq->name, sq->vq->index, sq->vq->name, 6180 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); 6181 } 6182 6183 static int virtnet_init_irq_moder(struct virtnet_info *vi) 6184 { 6185 u8 profile_flags = 0, coal_flags = 0; 6186 int ret, i; 6187 6188 profile_flags |= DIM_PROFILE_RX; 6189 coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS; 6190 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, 6191 DIM_CQ_PERIOD_MODE_START_FROM_EQE, 6192 0, virtnet_rx_dim_work, NULL); 6193 6194 if (ret) 6195 return ret; 6196 6197 for (i = 0; i < vi->max_queue_pairs; i++) 6198 net_dim_setting(vi->dev, &vi->rq[i].dim, false); 6199 6200 return 0; 6201 } 6202 6203 static void virtnet_free_irq_moder(struct virtnet_info *vi) 6204 { 6205 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 6206 return; 6207 6208 rtnl_lock(); 6209 net_dim_free_irq_moder(vi->dev); 6210 rtnl_unlock(); 6211 } 6212 6213 static const struct net_device_ops virtnet_netdev = { 6214 .ndo_open = virtnet_open, 6215 .ndo_stop = virtnet_close, 6216 .ndo_start_xmit = start_xmit, 6217 .ndo_validate_addr = eth_validate_addr, 6218 .ndo_set_mac_address = virtnet_set_mac_address, 6219 .ndo_set_rx_mode = virtnet_set_rx_mode, 6220 .ndo_get_stats64 = virtnet_stats, 6221 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 6222 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 6223 .ndo_bpf = virtnet_xdp, 6224 .ndo_xdp_xmit = virtnet_xdp_xmit, 6225 .ndo_xsk_wakeup = virtnet_xsk_wakeup, 6226 .ndo_features_check = passthru_features_check, 6227 .ndo_get_phys_port_name = virtnet_get_phys_port_name, 6228 .ndo_set_features = virtnet_set_features, 6229 .ndo_tx_timeout = virtnet_tx_timeout, 6230 }; 6231 6232 static void virtnet_config_changed_work(struct work_struct *work) 6233 { 6234 struct virtnet_info *vi = 6235 container_of(work, struct virtnet_info, config_work); 6236 u16 v; 6237 6238 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, 6239 struct virtio_net_config, status, &v) < 0) 6240 return; 6241 6242 if (v & VIRTIO_NET_S_ANNOUNCE) { 6243 netdev_notify_peers(vi->dev); 6244 virtnet_ack_link_announce(vi); 6245 } 6246 6247 /* Ignore unknown (future) status bits */ 6248 v &= VIRTIO_NET_S_LINK_UP; 6249 6250 if (vi->status == v) 6251 return; 6252 6253 vi->status = v; 6254 6255 if (vi->status & VIRTIO_NET_S_LINK_UP) { 6256 virtnet_update_settings(vi); 6257 netif_carrier_on(vi->dev); 6258 netif_tx_wake_all_queues(vi->dev); 6259 } else { 6260 netif_carrier_off(vi->dev); 6261 netif_tx_stop_all_queues(vi->dev); 6262 } 6263 } 6264 6265 static void virtnet_config_changed(struct virtio_device *vdev) 6266 { 6267 struct virtnet_info *vi = vdev->priv; 6268 6269 schedule_work(&vi->config_work); 6270 } 6271 6272 static void virtnet_free_queues(struct virtnet_info *vi) 6273 { 6274 int i; 6275 6276 for (i = 0; i < vi->max_queue_pairs; i++) { 6277 __netif_napi_del(&vi->rq[i].napi); 6278 __netif_napi_del(&vi->sq[i].napi); 6279 } 6280 6281 /* We called __netif_napi_del(), 6282 * we need to respect an RCU grace period before freeing vi->rq 6283 */ 6284 synchronize_net(); 6285 6286 kfree(vi->rq); 6287 kfree(vi->sq); 6288 kfree(vi->ctrl); 6289 } 6290 6291 static void _free_receive_bufs(struct virtnet_info *vi) 6292 { 6293 struct bpf_prog *old_prog; 6294 int i; 6295 6296 for (i = 0; i < vi->max_queue_pairs; i++) { 6297 while (vi->rq[i].pages) 6298 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); 6299 6300 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 6301 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); 6302 if (old_prog) 6303 bpf_prog_put(old_prog); 6304 } 6305 } 6306 6307 static void free_receive_bufs(struct virtnet_info *vi) 6308 { 6309 rtnl_lock(); 6310 _free_receive_bufs(vi); 6311 rtnl_unlock(); 6312 } 6313 6314 static void free_receive_page_frags(struct virtnet_info *vi) 6315 { 6316 int i; 6317 for (i = 0; i < vi->max_queue_pairs; i++) 6318 if (vi->rq[i].alloc_frag.page) { 6319 if (vi->rq[i].last_dma) 6320 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); 6321 put_page(vi->rq[i].alloc_frag.page); 6322 } 6323 } 6324 6325 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) 6326 { 6327 struct virtnet_info *vi = vq->vdev->priv; 6328 struct send_queue *sq; 6329 int i = vq2txq(vq); 6330 6331 sq = &vi->sq[i]; 6332 6333 switch (virtnet_xmit_ptr_unpack(&buf)) { 6334 case VIRTNET_XMIT_TYPE_SKB: 6335 case VIRTNET_XMIT_TYPE_SKB_ORPHAN: 6336 dev_kfree_skb(buf); 6337 break; 6338 6339 case VIRTNET_XMIT_TYPE_XDP: 6340 xdp_return_frame(buf); 6341 break; 6342 6343 case VIRTNET_XMIT_TYPE_XSK: 6344 xsk_tx_completed(sq->xsk_pool, 1); 6345 break; 6346 } 6347 } 6348 6349 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq) 6350 { 6351 struct virtnet_info *vi = vq->vdev->priv; 6352 int i = vq2txq(vq); 6353 6354 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); 6355 } 6356 6357 static void free_unused_bufs(struct virtnet_info *vi) 6358 { 6359 void *buf; 6360 int i; 6361 6362 for (i = 0; i < vi->max_queue_pairs; i++) { 6363 struct virtqueue *vq = vi->sq[i].vq; 6364 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 6365 virtnet_sq_free_unused_buf(vq, buf); 6366 cond_resched(); 6367 } 6368 6369 for (i = 0; i < vi->max_queue_pairs; i++) { 6370 struct virtqueue *vq = vi->rq[i].vq; 6371 6372 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 6373 virtnet_rq_unmap_free_buf(vq, buf); 6374 cond_resched(); 6375 } 6376 } 6377 6378 static void virtnet_del_vqs(struct virtnet_info *vi) 6379 { 6380 struct virtio_device *vdev = vi->vdev; 6381 6382 virtnet_clean_affinity(vi); 6383 6384 vdev->config->del_vqs(vdev); 6385 6386 virtnet_free_queues(vi); 6387 } 6388 6389 /* How large should a single buffer be so a queue full of these can fit at 6390 * least one full packet? 6391 * Logic below assumes the mergeable buffer header is used. 6392 */ 6393 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) 6394 { 6395 const unsigned int hdr_len = vi->hdr_len; 6396 unsigned int rq_size = virtqueue_get_vring_size(vq); 6397 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; 6398 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 6399 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 6400 6401 return max(max(min_buf_len, hdr_len) - hdr_len, 6402 (unsigned int)GOOD_PACKET_LEN); 6403 } 6404 6405 static int virtnet_find_vqs(struct virtnet_info *vi) 6406 { 6407 struct virtqueue_info *vqs_info; 6408 struct virtqueue **vqs; 6409 int ret = -ENOMEM; 6410 int total_vqs; 6411 bool *ctx; 6412 u16 i; 6413 6414 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by 6415 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by 6416 * possible control vq. 6417 */ 6418 total_vqs = vi->max_queue_pairs * 2 + 6419 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); 6420 6421 /* Allocate space for find_vqs parameters */ 6422 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); 6423 if (!vqs) 6424 goto err_vq; 6425 vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL); 6426 if (!vqs_info) 6427 goto err_vqs_info; 6428 if (!vi->big_packets || vi->mergeable_rx_bufs) { 6429 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); 6430 if (!ctx) 6431 goto err_ctx; 6432 } else { 6433 ctx = NULL; 6434 } 6435 6436 /* Parameters for control virtqueue, if any */ 6437 if (vi->has_cvq) { 6438 vqs_info[total_vqs - 1].name = "control"; 6439 } 6440 6441 /* Allocate/initialize parameters for send/receive virtqueues */ 6442 for (i = 0; i < vi->max_queue_pairs; i++) { 6443 vqs_info[rxq2vq(i)].callback = skb_recv_done; 6444 vqs_info[txq2vq(i)].callback = skb_xmit_done; 6445 sprintf(vi->rq[i].name, "input.%u", i); 6446 sprintf(vi->sq[i].name, "output.%u", i); 6447 vqs_info[rxq2vq(i)].name = vi->rq[i].name; 6448 vqs_info[txq2vq(i)].name = vi->sq[i].name; 6449 if (ctx) 6450 vqs_info[rxq2vq(i)].ctx = true; 6451 } 6452 6453 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); 6454 if (ret) 6455 goto err_find; 6456 6457 if (vi->has_cvq) { 6458 vi->cvq = vqs[total_vqs - 1]; 6459 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 6460 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6461 } 6462 6463 for (i = 0; i < vi->max_queue_pairs; i++) { 6464 vi->rq[i].vq = vqs[rxq2vq(i)]; 6465 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); 6466 vi->sq[i].vq = vqs[txq2vq(i)]; 6467 } 6468 6469 /* run here: ret == 0. */ 6470 6471 6472 err_find: 6473 kfree(ctx); 6474 err_ctx: 6475 kfree(vqs_info); 6476 err_vqs_info: 6477 kfree(vqs); 6478 err_vq: 6479 return ret; 6480 } 6481 6482 static int virtnet_alloc_queues(struct virtnet_info *vi) 6483 { 6484 int i; 6485 6486 if (vi->has_cvq) { 6487 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 6488 if (!vi->ctrl) 6489 goto err_ctrl; 6490 } else { 6491 vi->ctrl = NULL; 6492 } 6493 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); 6494 if (!vi->sq) 6495 goto err_sq; 6496 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); 6497 if (!vi->rq) 6498 goto err_rq; 6499 6500 INIT_DELAYED_WORK(&vi->refill, refill_work); 6501 for (i = 0; i < vi->max_queue_pairs; i++) { 6502 vi->rq[i].pages = NULL; 6503 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll, 6504 i); 6505 vi->rq[i].napi.weight = napi_weight; 6506 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 6507 virtnet_poll_tx, 6508 napi_tx ? napi_weight : 0); 6509 6510 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 6511 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); 6512 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); 6513 6514 u64_stats_init(&vi->rq[i].stats.syncp); 6515 u64_stats_init(&vi->sq[i].stats.syncp); 6516 mutex_init(&vi->rq[i].dim_lock); 6517 } 6518 6519 return 0; 6520 6521 err_rq: 6522 kfree(vi->sq); 6523 err_sq: 6524 kfree(vi->ctrl); 6525 err_ctrl: 6526 return -ENOMEM; 6527 } 6528 6529 static int init_vqs(struct virtnet_info *vi) 6530 { 6531 int ret; 6532 6533 /* Allocate send & receive queues */ 6534 ret = virtnet_alloc_queues(vi); 6535 if (ret) 6536 goto err; 6537 6538 ret = virtnet_find_vqs(vi); 6539 if (ret) 6540 goto err_free; 6541 6542 cpus_read_lock(); 6543 virtnet_set_affinity(vi); 6544 cpus_read_unlock(); 6545 6546 return 0; 6547 6548 err_free: 6549 virtnet_free_queues(vi); 6550 err: 6551 return ret; 6552 } 6553 6554 #ifdef CONFIG_SYSFS 6555 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, 6556 char *buf) 6557 { 6558 struct virtnet_info *vi = netdev_priv(queue->dev); 6559 unsigned int queue_index = get_netdev_rx_queue_index(queue); 6560 unsigned int headroom = virtnet_get_headroom(vi); 6561 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; 6562 struct ewma_pkt_len *avg; 6563 6564 BUG_ON(queue_index >= vi->max_queue_pairs); 6565 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 6566 return sprintf(buf, "%u\n", 6567 get_mergeable_buf_len(&vi->rq[queue_index], avg, 6568 SKB_DATA_ALIGN(headroom + tailroom))); 6569 } 6570 6571 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 6572 __ATTR_RO(mergeable_rx_buffer_size); 6573 6574 static struct attribute *virtio_net_mrg_rx_attrs[] = { 6575 &mergeable_rx_buffer_size_attribute.attr, 6576 NULL 6577 }; 6578 6579 static const struct attribute_group virtio_net_mrg_rx_group = { 6580 .name = "virtio_net", 6581 .attrs = virtio_net_mrg_rx_attrs 6582 }; 6583 #endif 6584 6585 static bool virtnet_fail_on_feature(struct virtio_device *vdev, 6586 unsigned int fbit, 6587 const char *fname, const char *dname) 6588 { 6589 if (!virtio_has_feature(vdev, fbit)) 6590 return false; 6591 6592 dev_err(&vdev->dev, "device advertises feature %s but not %s", 6593 fname, dname); 6594 6595 return true; 6596 } 6597 6598 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ 6599 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) 6600 6601 static bool virtnet_validate_features(struct virtio_device *vdev) 6602 { 6603 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && 6604 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, 6605 "VIRTIO_NET_F_CTRL_VQ") || 6606 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, 6607 "VIRTIO_NET_F_CTRL_VQ") || 6608 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, 6609 "VIRTIO_NET_F_CTRL_VQ") || 6610 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || 6611 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, 6612 "VIRTIO_NET_F_CTRL_VQ") || 6613 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS, 6614 "VIRTIO_NET_F_CTRL_VQ") || 6615 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, 6616 "VIRTIO_NET_F_CTRL_VQ") || 6617 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, 6618 "VIRTIO_NET_F_CTRL_VQ") || 6619 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL, 6620 "VIRTIO_NET_F_CTRL_VQ"))) { 6621 return false; 6622 } 6623 6624 return true; 6625 } 6626 6627 #define MIN_MTU ETH_MIN_MTU 6628 #define MAX_MTU ETH_MAX_MTU 6629 6630 static int virtnet_validate(struct virtio_device *vdev) 6631 { 6632 if (!vdev->config->get) { 6633 dev_err(&vdev->dev, "%s failure: config access disabled\n", 6634 __func__); 6635 return -EINVAL; 6636 } 6637 6638 if (!virtnet_validate_features(vdev)) 6639 return -EINVAL; 6640 6641 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 6642 int mtu = virtio_cread16(vdev, 6643 offsetof(struct virtio_net_config, 6644 mtu)); 6645 if (mtu < MIN_MTU) 6646 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 6647 } 6648 6649 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) && 6650 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 6651 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby"); 6652 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY); 6653 } 6654 6655 return 0; 6656 } 6657 6658 static bool virtnet_check_guest_gso(const struct virtnet_info *vi) 6659 { 6660 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 6661 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 6662 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 6663 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || 6664 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && 6665 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); 6666 } 6667 6668 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) 6669 { 6670 bool guest_gso = virtnet_check_guest_gso(vi); 6671 6672 /* If device can receive ANY guest GSO packets, regardless of mtu, 6673 * allocate packets of maximum size, otherwise limit it to only 6674 * mtu size worth only. 6675 */ 6676 if (mtu > ETH_DATA_LEN || guest_gso) { 6677 vi->big_packets = true; 6678 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); 6679 } 6680 } 6681 6682 #define VIRTIO_NET_HASH_REPORT_MAX_TABLE 10 6683 static enum xdp_rss_hash_type 6684 virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = { 6685 [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE, 6686 [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4, 6687 [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP, 6688 [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP, 6689 [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6, 6690 [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP, 6691 [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP, 6692 [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, 6693 [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, 6694 [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX 6695 }; 6696 6697 static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, 6698 enum xdp_rss_hash_type *rss_type) 6699 { 6700 const struct xdp_buff *xdp = (void *)_ctx; 6701 struct virtio_net_hdr_v1_hash *hdr_hash; 6702 struct virtnet_info *vi; 6703 u16 hash_report; 6704 6705 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) 6706 return -ENODATA; 6707 6708 vi = netdev_priv(xdp->rxq->dev); 6709 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); 6710 hash_report = __le16_to_cpu(hdr_hash->hash_report); 6711 6712 if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE) 6713 hash_report = VIRTIO_NET_HASH_REPORT_NONE; 6714 6715 *rss_type = virtnet_xdp_rss_type[hash_report]; 6716 *hash = __le32_to_cpu(hdr_hash->hash_value); 6717 return 0; 6718 } 6719 6720 static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = { 6721 .xmo_rx_hash = virtnet_xdp_rx_hash, 6722 }; 6723 6724 static int virtnet_probe(struct virtio_device *vdev) 6725 { 6726 int i, err = -ENOMEM; 6727 struct net_device *dev; 6728 struct virtnet_info *vi; 6729 u16 max_queue_pairs; 6730 int mtu = 0; 6731 6732 /* Find if host supports multiqueue/rss virtio_net device */ 6733 max_queue_pairs = 1; 6734 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) 6735 max_queue_pairs = 6736 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); 6737 6738 /* We need at least 2 queue's */ 6739 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 6740 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || 6741 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 6742 max_queue_pairs = 1; 6743 6744 /* Allocate ourselves a network device with room for our info */ 6745 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); 6746 if (!dev) 6747 return -ENOMEM; 6748 6749 /* Set up network device as normal. */ 6750 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | 6751 IFF_TX_SKB_NO_LINEAR; 6752 dev->netdev_ops = &virtnet_netdev; 6753 dev->stat_ops = &virtnet_stat_ops; 6754 dev->features = NETIF_F_HIGHDMA; 6755 6756 dev->ethtool_ops = &virtnet_ethtool_ops; 6757 SET_NETDEV_DEV(dev, &vdev->dev); 6758 6759 /* Do we support "hardware" checksums? */ 6760 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 6761 /* This opens up the world of extra features. */ 6762 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; 6763 if (csum) 6764 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 6765 6766 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 6767 dev->hw_features |= NETIF_F_TSO 6768 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 6769 } 6770 /* Individual feature bits: what can host handle? */ 6771 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 6772 dev->hw_features |= NETIF_F_TSO; 6773 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 6774 dev->hw_features |= NETIF_F_TSO6; 6775 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 6776 dev->hw_features |= NETIF_F_TSO_ECN; 6777 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO)) 6778 dev->hw_features |= NETIF_F_GSO_UDP_L4; 6779 6780 dev->features |= NETIF_F_GSO_ROBUST; 6781 6782 if (gso) 6783 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; 6784 /* (!csum && gso) case will be fixed by register_netdev() */ 6785 } 6786 6787 /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't 6788 * need to calculate checksums for partially checksummed packets, 6789 * as they're considered valid by the upper layer. 6790 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only 6791 * receives fully checksummed packets. The device may assist in 6792 * validating these packets' checksums, so the driver won't have to. 6793 */ 6794 dev->features |= NETIF_F_RXCSUM; 6795 6796 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 6797 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) 6798 dev->features |= NETIF_F_GRO_HW; 6799 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) 6800 dev->hw_features |= NETIF_F_GRO_HW; 6801 6802 dev->vlan_features = dev->features; 6803 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 6804 NETDEV_XDP_ACT_XSK_ZEROCOPY; 6805 6806 /* MTU range: 68 - 65535 */ 6807 dev->min_mtu = MIN_MTU; 6808 dev->max_mtu = MAX_MTU; 6809 6810 /* Configuration may specify what MAC to use. Otherwise random. */ 6811 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 6812 u8 addr[ETH_ALEN]; 6813 6814 virtio_cread_bytes(vdev, 6815 offsetof(struct virtio_net_config, mac), 6816 addr, ETH_ALEN); 6817 eth_hw_addr_set(dev, addr); 6818 } else { 6819 eth_hw_addr_random(dev); 6820 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", 6821 dev->dev_addr); 6822 } 6823 6824 /* Set up our device-specific information */ 6825 vi = netdev_priv(dev); 6826 vi->dev = dev; 6827 vi->vdev = vdev; 6828 vdev->priv = vi; 6829 6830 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 6831 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); 6832 spin_lock_init(&vi->refill_lock); 6833 6834 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { 6835 vi->mergeable_rx_bufs = true; 6836 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; 6837 } 6838 6839 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) 6840 vi->has_rss_hash_report = true; 6841 6842 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) { 6843 vi->has_rss = true; 6844 6845 vi->rss_indir_table_size = 6846 virtio_cread16(vdev, offsetof(struct virtio_net_config, 6847 rss_max_indirection_table_length)); 6848 } 6849 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); 6850 if (!vi->rss_hdr) { 6851 err = -ENOMEM; 6852 goto free; 6853 } 6854 6855 if (vi->has_rss || vi->has_rss_hash_report) { 6856 vi->rss_key_size = 6857 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); 6858 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { 6859 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", 6860 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); 6861 err = -EINVAL; 6862 goto free; 6863 } 6864 6865 vi->rss_hash_types_supported = 6866 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); 6867 vi->rss_hash_types_supported &= 6868 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX | 6869 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | 6870 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); 6871 6872 dev->hw_features |= NETIF_F_RXHASH; 6873 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; 6874 } 6875 6876 if (vi->has_rss_hash_report) 6877 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); 6878 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || 6879 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 6880 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 6881 else 6882 vi->hdr_len = sizeof(struct virtio_net_hdr); 6883 6884 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 6885 virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 6886 vi->any_header_sg = true; 6887 6888 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 6889 vi->has_cvq = true; 6890 6891 mutex_init(&vi->cvq_lock); 6892 6893 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { 6894 mtu = virtio_cread16(vdev, 6895 offsetof(struct virtio_net_config, 6896 mtu)); 6897 if (mtu < dev->min_mtu) { 6898 /* Should never trigger: MTU was previously validated 6899 * in virtnet_validate. 6900 */ 6901 dev_err(&vdev->dev, 6902 "device MTU appears to have changed it is now %d < %d", 6903 mtu, dev->min_mtu); 6904 err = -EINVAL; 6905 goto free; 6906 } 6907 6908 dev->mtu = mtu; 6909 dev->max_mtu = mtu; 6910 } 6911 6912 virtnet_set_big_packets(vi, mtu); 6913 6914 if (vi->any_header_sg) 6915 dev->needed_headroom = vi->hdr_len; 6916 6917 /* Enable multiqueue by default */ 6918 if (num_online_cpus() >= max_queue_pairs) 6919 vi->curr_queue_pairs = max_queue_pairs; 6920 else 6921 vi->curr_queue_pairs = num_online_cpus(); 6922 vi->max_queue_pairs = max_queue_pairs; 6923 6924 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 6925 err = init_vqs(vi); 6926 if (err) 6927 goto free; 6928 6929 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { 6930 vi->intr_coal_rx.max_usecs = 0; 6931 vi->intr_coal_tx.max_usecs = 0; 6932 vi->intr_coal_rx.max_packets = 0; 6933 6934 /* Keep the default values of the coalescing parameters 6935 * aligned with the default napi_tx state. 6936 */ 6937 if (vi->sq[0].napi.weight) 6938 vi->intr_coal_tx.max_packets = 1; 6939 else 6940 vi->intr_coal_tx.max_packets = 0; 6941 } 6942 6943 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { 6944 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */ 6945 for (i = 0; i < vi->max_queue_pairs; i++) 6946 if (vi->sq[i].napi.weight) 6947 vi->sq[i].intr_coal.max_packets = 1; 6948 6949 err = virtnet_init_irq_moder(vi); 6950 if (err) 6951 goto free; 6952 } 6953 6954 #ifdef CONFIG_SYSFS 6955 if (vi->mergeable_rx_bufs) 6956 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; 6957 #endif 6958 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); 6959 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); 6960 6961 virtnet_init_settings(dev); 6962 6963 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { 6964 vi->failover = net_failover_create(vi->dev); 6965 if (IS_ERR(vi->failover)) { 6966 err = PTR_ERR(vi->failover); 6967 goto free_vqs; 6968 } 6969 } 6970 6971 if (vi->has_rss || vi->has_rss_hash_report) 6972 virtnet_init_default_rss(vi); 6973 6974 enable_rx_mode_work(vi); 6975 6976 /* serialize netdev register + virtio_device_ready() with ndo_open() */ 6977 rtnl_lock(); 6978 6979 err = register_netdevice(dev); 6980 if (err) { 6981 pr_debug("virtio_net: registering device failed\n"); 6982 rtnl_unlock(); 6983 goto free_failover; 6984 } 6985 6986 /* Disable config change notification until ndo_open. */ 6987 virtio_config_driver_disable(vi->vdev); 6988 6989 virtio_device_ready(vdev); 6990 6991 if (vi->has_rss || vi->has_rss_hash_report) { 6992 if (!virtnet_commit_rss_command(vi)) { 6993 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); 6994 dev->hw_features &= ~NETIF_F_RXHASH; 6995 vi->has_rss_hash_report = false; 6996 vi->has_rss = false; 6997 } 6998 } 6999 7000 virtnet_set_queues(vi, vi->curr_queue_pairs); 7001 7002 /* a random MAC address has been assigned, notify the device. 7003 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there 7004 * because many devices work fine without getting MAC explicitly 7005 */ 7006 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && 7007 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 7008 struct scatterlist sg; 7009 7010 sg_init_one(&sg, dev->dev_addr, dev->addr_len); 7011 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 7012 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { 7013 pr_debug("virtio_net: setting MAC address failed\n"); 7014 rtnl_unlock(); 7015 err = -EINVAL; 7016 goto free_unregister_netdev; 7017 } 7018 } 7019 7020 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { 7021 struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL; 7022 struct scatterlist sg; 7023 __le64 v; 7024 7025 stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL); 7026 if (!stats_cap) { 7027 rtnl_unlock(); 7028 err = -ENOMEM; 7029 goto free_unregister_netdev; 7030 } 7031 7032 sg_init_one(&sg, stats_cap, sizeof(*stats_cap)); 7033 7034 if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, 7035 VIRTIO_NET_CTRL_STATS_QUERY, 7036 NULL, &sg)) { 7037 pr_debug("virtio_net: fail to get stats capability\n"); 7038 rtnl_unlock(); 7039 err = -EINVAL; 7040 goto free_unregister_netdev; 7041 } 7042 7043 v = stats_cap->supported_stats_types[0]; 7044 vi->device_stats_cap = le64_to_cpu(v); 7045 } 7046 7047 /* Assume link up if device can't report link status, 7048 otherwise get link status from config. */ 7049 netif_carrier_off(dev); 7050 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 7051 virtnet_config_changed_work(&vi->config_work); 7052 } else { 7053 vi->status = VIRTIO_NET_S_LINK_UP; 7054 virtnet_update_settings(vi); 7055 netif_carrier_on(dev); 7056 } 7057 7058 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) 7059 if (virtio_has_feature(vi->vdev, guest_offloads[i])) 7060 set_bit(guest_offloads[i], &vi->guest_offloads); 7061 vi->guest_offloads_capable = vi->guest_offloads; 7062 7063 rtnl_unlock(); 7064 7065 err = virtnet_cpu_notif_add(vi); 7066 if (err) { 7067 pr_debug("virtio_net: registering cpu notifier failed\n"); 7068 goto free_unregister_netdev; 7069 } 7070 7071 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", 7072 dev->name, max_queue_pairs); 7073 7074 return 0; 7075 7076 free_unregister_netdev: 7077 unregister_netdev(dev); 7078 free_failover: 7079 net_failover_destroy(vi->failover); 7080 free_vqs: 7081 virtio_reset_device(vdev); 7082 cancel_delayed_work_sync(&vi->refill); 7083 free_receive_page_frags(vi); 7084 virtnet_del_vqs(vi); 7085 free: 7086 free_netdev(dev); 7087 return err; 7088 } 7089 7090 static void remove_vq_common(struct virtnet_info *vi) 7091 { 7092 int i; 7093 7094 virtio_reset_device(vi->vdev); 7095 7096 /* Free unused buffers in both send and recv, if any. */ 7097 free_unused_bufs(vi); 7098 7099 /* 7100 * Rule of thumb is netdev_tx_reset_queue() should follow any 7101 * skb freeing not followed by netdev_tx_completed_queue() 7102 */ 7103 for (i = 0; i < vi->max_queue_pairs; i++) 7104 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); 7105 7106 free_receive_bufs(vi); 7107 7108 free_receive_page_frags(vi); 7109 7110 virtnet_del_vqs(vi); 7111 } 7112 7113 static void virtnet_remove(struct virtio_device *vdev) 7114 { 7115 struct virtnet_info *vi = vdev->priv; 7116 7117 virtnet_cpu_notif_remove(vi); 7118 7119 /* Make sure no work handler is accessing the device. */ 7120 flush_work(&vi->config_work); 7121 disable_rx_mode_work(vi); 7122 flush_work(&vi->rx_mode_work); 7123 7124 virtnet_free_irq_moder(vi); 7125 7126 unregister_netdev(vi->dev); 7127 7128 net_failover_destroy(vi->failover); 7129 7130 remove_vq_common(vi); 7131 7132 free_netdev(vi->dev); 7133 } 7134 7135 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) 7136 { 7137 struct virtnet_info *vi = vdev->priv; 7138 7139 virtnet_cpu_notif_remove(vi); 7140 virtnet_freeze_down(vdev); 7141 remove_vq_common(vi); 7142 7143 return 0; 7144 } 7145 7146 static __maybe_unused int virtnet_restore(struct virtio_device *vdev) 7147 { 7148 struct virtnet_info *vi = vdev->priv; 7149 int err; 7150 7151 err = virtnet_restore_up(vdev); 7152 if (err) 7153 return err; 7154 virtnet_set_queues(vi, vi->curr_queue_pairs); 7155 7156 err = virtnet_cpu_notif_add(vi); 7157 if (err) { 7158 virtnet_freeze_down(vdev); 7159 remove_vq_common(vi); 7160 return err; 7161 } 7162 7163 return 0; 7164 } 7165 7166 static struct virtio_device_id id_table[] = { 7167 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 7168 { 0 }, 7169 }; 7170 7171 #define VIRTNET_FEATURES \ 7172 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ 7173 VIRTIO_NET_F_MAC, \ 7174 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ 7175 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ 7176 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ 7177 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \ 7178 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ 7179 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ 7180 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ 7181 VIRTIO_NET_F_CTRL_MAC_ADDR, \ 7182 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ 7183 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ 7184 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ 7185 VIRTIO_NET_F_VQ_NOTF_COAL, \ 7186 VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS 7187 7188 static unsigned int features[] = { 7189 VIRTNET_FEATURES, 7190 }; 7191 7192 static unsigned int features_legacy[] = { 7193 VIRTNET_FEATURES, 7194 VIRTIO_NET_F_GSO, 7195 VIRTIO_F_ANY_LAYOUT, 7196 }; 7197 7198 static struct virtio_driver virtio_net_driver = { 7199 .feature_table = features, 7200 .feature_table_size = ARRAY_SIZE(features), 7201 .feature_table_legacy = features_legacy, 7202 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 7203 .driver.name = KBUILD_MODNAME, 7204 .id_table = id_table, 7205 .validate = virtnet_validate, 7206 .probe = virtnet_probe, 7207 .remove = virtnet_remove, 7208 .config_changed = virtnet_config_changed, 7209 #ifdef CONFIG_PM_SLEEP 7210 .freeze = virtnet_freeze, 7211 .restore = virtnet_restore, 7212 #endif 7213 }; 7214 7215 static __init int virtio_net_driver_init(void) 7216 { 7217 int ret; 7218 7219 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", 7220 virtnet_cpu_online, 7221 virtnet_cpu_down_prep); 7222 if (ret < 0) 7223 goto out; 7224 virtionet_online = ret; 7225 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", 7226 NULL, virtnet_cpu_dead); 7227 if (ret) 7228 goto err_dead; 7229 ret = register_virtio_driver(&virtio_net_driver); 7230 if (ret) 7231 goto err_virtio; 7232 return 0; 7233 err_virtio: 7234 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 7235 err_dead: 7236 cpuhp_remove_multi_state(virtionet_online); 7237 out: 7238 return ret; 7239 } 7240 module_init(virtio_net_driver_init); 7241 7242 static __exit void virtio_net_driver_exit(void) 7243 { 7244 unregister_virtio_driver(&virtio_net_driver); 7245 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 7246 cpuhp_remove_multi_state(virtionet_online); 7247 } 7248 module_exit(virtio_net_driver_exit); 7249 7250 MODULE_DEVICE_TABLE(virtio, id_table); 7251 MODULE_DESCRIPTION("Virtio network driver"); 7252 MODULE_LICENSE("GPL"); 7253