1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 */ 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/atomic.h> 13 #include <linux/ethtool.h> 14 #include <linux/module.h> 15 #include <linux/highmem.h> 16 #include <linux/device.h> 17 #include <linux/io.h> 18 #include <linux/delay.h> 19 #include <linux/netdevice.h> 20 #include <linux/inetdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/pci.h> 23 #include <linux/skbuff.h> 24 #include <linux/if_vlan.h> 25 #include <linux/in.h> 26 #include <linux/slab.h> 27 #include <linux/rtnetlink.h> 28 #include <linux/netpoll.h> 29 #include <linux/bpf.h> 30 31 #include <net/arp.h> 32 #include <net/route.h> 33 #include <net/sock.h> 34 #include <net/pkt_sched.h> 35 #include <net/checksum.h> 36 #include <net/ip6_checksum.h> 37 38 #include "hyperv_net.h" 39 40 #define RING_SIZE_MIN 64 41 42 #define LINKCHANGE_INT (2 * HZ) 43 #define VF_TAKEOVER_INT (HZ / 10) 44 45 /* Macros to define the context of vf registration */ 46 #define VF_REG_IN_PROBE 1 47 #define VF_REG_IN_NOTIFIER 2 48 49 static unsigned int ring_size __ro_after_init = 128; 50 module_param(ring_size, uint, 0444); 51 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); 52 unsigned int netvsc_ring_bytes __ro_after_init; 53 54 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 55 NETIF_MSG_LINK | NETIF_MSG_IFUP | 56 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | 57 NETIF_MSG_TX_ERR; 58 59 static int debug = -1; 60 module_param(debug, int, 0444); 61 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 62 63 static LIST_HEAD(netvsc_dev_list); 64 65 static void netvsc_change_rx_flags(struct net_device *net, int change) 66 { 67 struct net_device_context *ndev_ctx = netdev_priv(net); 68 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 69 int inc; 70 71 if (!vf_netdev) 72 return; 73 74 if (change & IFF_PROMISC) { 75 inc = (net->flags & IFF_PROMISC) ? 1 : -1; 76 dev_set_promiscuity(vf_netdev, inc); 77 } 78 79 if (change & IFF_ALLMULTI) { 80 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; 81 dev_set_allmulti(vf_netdev, inc); 82 } 83 } 84 85 static void netvsc_set_rx_mode(struct net_device *net) 86 { 87 struct net_device_context *ndev_ctx = netdev_priv(net); 88 struct net_device *vf_netdev; 89 struct netvsc_device *nvdev; 90 91 rcu_read_lock(); 92 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); 93 if (vf_netdev) { 94 dev_uc_sync(vf_netdev, net); 95 dev_mc_sync(vf_netdev, net); 96 } 97 98 nvdev = rcu_dereference(ndev_ctx->nvdev); 99 if (nvdev) 100 rndis_filter_update(nvdev); 101 rcu_read_unlock(); 102 } 103 104 static void netvsc_tx_enable(struct netvsc_device *nvscdev, 105 struct net_device *ndev) 106 { 107 nvscdev->tx_disable = false; 108 virt_wmb(); /* ensure queue wake up mechanism is on */ 109 110 netif_tx_wake_all_queues(ndev); 111 } 112 113 static int netvsc_open(struct net_device *net) 114 { 115 struct net_device_context *ndev_ctx = netdev_priv(net); 116 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 117 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 118 struct rndis_device *rdev; 119 int ret = 0; 120 121 netif_carrier_off(net); 122 123 /* Open up the device */ 124 ret = rndis_filter_open(nvdev); 125 if (ret != 0) { 126 netdev_err(net, "unable to open device (ret %d).\n", ret); 127 return ret; 128 } 129 130 rdev = nvdev->extension; 131 if (!rdev->link_state) { 132 netif_carrier_on(net); 133 netvsc_tx_enable(nvdev, net); 134 } 135 136 if (vf_netdev) { 137 /* Setting synthetic device up transparently sets 138 * slave as up. If open fails, then slave will be 139 * still be offline (and not used). 140 */ 141 ret = dev_open(vf_netdev, NULL); 142 if (ret) 143 netdev_warn(net, 144 "unable to open slave: %s: %d\n", 145 vf_netdev->name, ret); 146 } 147 return 0; 148 } 149 150 static int netvsc_wait_until_empty(struct netvsc_device *nvdev) 151 { 152 unsigned int retry = 0; 153 int i; 154 155 /* Ensure pending bytes in ring are read */ 156 for (;;) { 157 u32 aread = 0; 158 159 for (i = 0; i < nvdev->num_chn; i++) { 160 struct vmbus_channel *chn 161 = nvdev->chan_table[i].channel; 162 163 if (!chn) 164 continue; 165 166 /* make sure receive not running now */ 167 napi_synchronize(&nvdev->chan_table[i].napi); 168 169 aread = hv_get_bytes_to_read(&chn->inbound); 170 if (aread) 171 break; 172 173 aread = hv_get_bytes_to_read(&chn->outbound); 174 if (aread) 175 break; 176 } 177 178 if (aread == 0) 179 return 0; 180 181 if (++retry > RETRY_MAX) 182 return -ETIMEDOUT; 183 184 usleep_range(RETRY_US_LO, RETRY_US_HI); 185 } 186 } 187 188 static void netvsc_tx_disable(struct netvsc_device *nvscdev, 189 struct net_device *ndev) 190 { 191 if (nvscdev) { 192 nvscdev->tx_disable = true; 193 virt_wmb(); /* ensure txq will not wake up after stop */ 194 } 195 196 netif_tx_disable(ndev); 197 } 198 199 static int netvsc_close(struct net_device *net) 200 { 201 struct net_device_context *net_device_ctx = netdev_priv(net); 202 struct net_device *vf_netdev 203 = rtnl_dereference(net_device_ctx->vf_netdev); 204 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 205 int ret; 206 207 netvsc_tx_disable(nvdev, net); 208 209 /* No need to close rndis filter if it is removed already */ 210 if (!nvdev) 211 return 0; 212 213 ret = rndis_filter_close(nvdev); 214 if (ret != 0) { 215 netdev_err(net, "unable to close device (ret %d).\n", ret); 216 return ret; 217 } 218 219 ret = netvsc_wait_until_empty(nvdev); 220 if (ret) 221 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 222 223 if (vf_netdev) 224 dev_close(vf_netdev); 225 226 return ret; 227 } 228 229 static inline void *init_ppi_data(struct rndis_message *msg, 230 u32 ppi_size, u32 pkt_type) 231 { 232 struct rndis_packet *rndis_pkt = &msg->msg.pkt; 233 struct rndis_per_packet_info *ppi; 234 235 rndis_pkt->data_offset += ppi_size; 236 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset 237 + rndis_pkt->per_pkt_info_len; 238 239 ppi->size = ppi_size; 240 ppi->type = pkt_type; 241 ppi->internal = 0; 242 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 243 244 rndis_pkt->per_pkt_info_len += ppi_size; 245 246 return ppi + 1; 247 } 248 249 static inline int netvsc_get_tx_queue(struct net_device *ndev, 250 struct sk_buff *skb, int old_idx) 251 { 252 const struct net_device_context *ndc = netdev_priv(ndev); 253 struct sock *sk = skb->sk; 254 int q_idx; 255 256 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & 257 (VRSS_SEND_TAB_SIZE - 1)]; 258 259 /* If queue index changed record the new value */ 260 if (q_idx != old_idx && 261 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 262 sk_tx_queue_set(sk, q_idx); 263 264 return q_idx; 265 } 266 267 /* 268 * Select queue for transmit. 269 * 270 * If a valid queue has already been assigned, then use that. 271 * Otherwise compute tx queue based on hash and the send table. 272 * 273 * This is basically similar to default (netdev_pick_tx) with the added step 274 * of using the host send_table when no other queue has been assigned. 275 * 276 * TODO support XPS - but get_xps_queue not exported 277 */ 278 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) 279 { 280 int q_idx = sk_tx_queue_get(skb->sk); 281 282 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { 283 /* If forwarding a packet, we use the recorded queue when 284 * available for better cache locality. 285 */ 286 if (skb_rx_queue_recorded(skb)) 287 q_idx = skb_get_rx_queue(skb); 288 else 289 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); 290 } 291 292 return q_idx; 293 } 294 295 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 296 struct net_device *sb_dev) 297 { 298 struct net_device_context *ndc = netdev_priv(ndev); 299 struct net_device *vf_netdev; 300 u16 txq; 301 302 rcu_read_lock(); 303 vf_netdev = rcu_dereference(ndc->vf_netdev); 304 if (vf_netdev) { 305 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; 306 307 if (vf_ops->ndo_select_queue) 308 txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev); 309 else 310 txq = netdev_pick_tx(vf_netdev, skb, NULL); 311 312 /* Record the queue selected by VF so that it can be 313 * used for common case where VF has more queues than 314 * the synthetic device. 315 */ 316 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; 317 } else { 318 txq = netvsc_pick_tx(ndev, skb); 319 } 320 rcu_read_unlock(); 321 322 while (txq >= ndev->real_num_tx_queues) 323 txq -= ndev->real_num_tx_queues; 324 325 return txq; 326 } 327 328 static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len, 329 struct hv_page_buffer *pb) 330 { 331 int j = 0; 332 333 hvpfn += offset >> HV_HYP_PAGE_SHIFT; 334 offset = offset & ~HV_HYP_PAGE_MASK; 335 336 while (len > 0) { 337 unsigned long bytes; 338 339 bytes = HV_HYP_PAGE_SIZE - offset; 340 if (bytes > len) 341 bytes = len; 342 pb[j].pfn = hvpfn; 343 pb[j].offset = offset; 344 pb[j].len = bytes; 345 346 offset += bytes; 347 len -= bytes; 348 349 if (offset == HV_HYP_PAGE_SIZE && len) { 350 hvpfn++; 351 offset = 0; 352 j++; 353 } 354 } 355 356 return j + 1; 357 } 358 359 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 360 struct hv_netvsc_packet *packet, 361 struct hv_page_buffer *pb) 362 { 363 u32 slots_used = 0; 364 char *data = skb->data; 365 int frags = skb_shinfo(skb)->nr_frags; 366 int i; 367 368 /* The packet is laid out thus: 369 * 1. hdr: RNDIS header and PPI 370 * 2. skb linear data 371 * 3. skb fragment data 372 */ 373 slots_used += fill_pg_buf(virt_to_hvpfn(hdr), 374 offset_in_hvpage(hdr), 375 len, 376 &pb[slots_used]); 377 378 packet->rmsg_size = len; 379 packet->rmsg_pgcnt = slots_used; 380 381 slots_used += fill_pg_buf(virt_to_hvpfn(data), 382 offset_in_hvpage(data), 383 skb_headlen(skb), 384 &pb[slots_used]); 385 386 for (i = 0; i < frags; i++) { 387 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 388 389 slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)), 390 skb_frag_off(frag), 391 skb_frag_size(frag), 392 &pb[slots_used]); 393 } 394 return slots_used; 395 } 396 397 static int count_skb_frag_slots(struct sk_buff *skb) 398 { 399 int i, frags = skb_shinfo(skb)->nr_frags; 400 int pages = 0; 401 402 for (i = 0; i < frags; i++) { 403 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 404 unsigned long size = skb_frag_size(frag); 405 unsigned long offset = skb_frag_off(frag); 406 407 /* Skip unused frames from start of page */ 408 offset &= ~HV_HYP_PAGE_MASK; 409 pages += HVPFN_UP(offset + size); 410 } 411 return pages; 412 } 413 414 static int netvsc_get_slots(struct sk_buff *skb) 415 { 416 char *data = skb->data; 417 unsigned int offset = offset_in_hvpage(data); 418 unsigned int len = skb_headlen(skb); 419 int slots; 420 int frag_slots; 421 422 slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE); 423 frag_slots = count_skb_frag_slots(skb); 424 return slots + frag_slots; 425 } 426 427 static u32 net_checksum_info(struct sk_buff *skb) 428 { 429 if (skb->protocol == htons(ETH_P_IP)) { 430 struct iphdr *ip = ip_hdr(skb); 431 432 if (ip->protocol == IPPROTO_TCP) 433 return TRANSPORT_INFO_IPV4_TCP; 434 else if (ip->protocol == IPPROTO_UDP) 435 return TRANSPORT_INFO_IPV4_UDP; 436 } else { 437 struct ipv6hdr *ip6 = ipv6_hdr(skb); 438 439 if (ip6->nexthdr == IPPROTO_TCP) 440 return TRANSPORT_INFO_IPV6_TCP; 441 else if (ip6->nexthdr == IPPROTO_UDP) 442 return TRANSPORT_INFO_IPV6_UDP; 443 } 444 445 return TRANSPORT_INFO_NOT_IP; 446 } 447 448 /* Send skb on the slave VF device. */ 449 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, 450 struct sk_buff *skb) 451 { 452 struct net_device_context *ndev_ctx = netdev_priv(net); 453 unsigned int len = skb->len; 454 int rc; 455 456 skb->dev = vf_netdev; 457 skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); 458 459 rc = dev_queue_xmit(skb); 460 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { 461 struct netvsc_vf_pcpu_stats *pcpu_stats 462 = this_cpu_ptr(ndev_ctx->vf_stats); 463 464 u64_stats_update_begin(&pcpu_stats->syncp); 465 pcpu_stats->tx_packets++; 466 pcpu_stats->tx_bytes += len; 467 u64_stats_update_end(&pcpu_stats->syncp); 468 } else { 469 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); 470 } 471 472 return rc; 473 } 474 475 static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) 476 { 477 struct net_device_context *net_device_ctx = netdev_priv(net); 478 struct hv_netvsc_packet *packet = NULL; 479 int ret; 480 unsigned int num_data_pgs; 481 struct rndis_message *rndis_msg; 482 struct net_device *vf_netdev; 483 u32 rndis_msg_size; 484 u32 hash; 485 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; 486 487 /* If VF is present and up then redirect packets to it. 488 * Skip the VF if it is marked down or has no carrier. 489 * If netpoll is in uses, then VF can not be used either. 490 */ 491 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); 492 if (vf_netdev && netif_running(vf_netdev) && 493 netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) && 494 net_device_ctx->data_path_is_vf) 495 return netvsc_vf_xmit(net, vf_netdev, skb); 496 497 /* We will atmost need two pages to describe the rndis 498 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 499 * of pages in a single packet. If skb is scattered around 500 * more pages we try linearizing it. 501 */ 502 503 num_data_pgs = netvsc_get_slots(skb) + 2; 504 505 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 506 ++net_device_ctx->eth_stats.tx_scattered; 507 508 if (skb_linearize(skb)) 509 goto no_memory; 510 511 num_data_pgs = netvsc_get_slots(skb) + 2; 512 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 513 ++net_device_ctx->eth_stats.tx_too_big; 514 goto drop; 515 } 516 } 517 518 /* 519 * Place the rndis header in the skb head room and 520 * the skb->cb will be used for hv_netvsc_packet 521 * structure. 522 */ 523 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); 524 if (ret) 525 goto no_memory; 526 527 /* Use the skb control buffer for building up the packet */ 528 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > 529 sizeof_field(struct sk_buff, cb)); 530 packet = (struct hv_netvsc_packet *)skb->cb; 531 532 packet->q_idx = skb_get_queue_mapping(skb); 533 534 packet->total_data_buflen = skb->len; 535 packet->total_bytes = skb->len; 536 packet->total_packets = 1; 537 538 rndis_msg = (struct rndis_message *)skb->head; 539 540 /* Add the rndis header */ 541 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 542 rndis_msg->msg_len = packet->total_data_buflen; 543 544 rndis_msg->msg.pkt = (struct rndis_packet) { 545 .data_offset = sizeof(struct rndis_packet), 546 .data_len = packet->total_data_buflen, 547 .per_pkt_info_offset = sizeof(struct rndis_packet), 548 }; 549 550 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 551 552 hash = skb_get_hash_raw(skb); 553 if (hash != 0 && net->real_num_tx_queues > 1) { 554 u32 *hash_info; 555 556 rndis_msg_size += NDIS_HASH_PPI_SIZE; 557 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 558 NBL_HASH_VALUE); 559 *hash_info = hash; 560 } 561 562 /* When using AF_PACKET we need to drop VLAN header from 563 * the frame and update the SKB to allow the HOST OS 564 * to transmit the 802.1Q packet 565 */ 566 if (skb->protocol == htons(ETH_P_8021Q)) { 567 u16 vlan_tci; 568 569 skb_reset_mac_header(skb); 570 if (eth_type_vlan(eth_hdr(skb)->h_proto)) { 571 if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) { 572 ++net_device_ctx->eth_stats.vlan_error; 573 goto drop; 574 } 575 576 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 577 /* Update the NDIS header pkt lengths */ 578 packet->total_data_buflen -= VLAN_HLEN; 579 packet->total_bytes -= VLAN_HLEN; 580 rndis_msg->msg_len = packet->total_data_buflen; 581 rndis_msg->msg.pkt.data_len = packet->total_data_buflen; 582 } 583 } 584 585 if (skb_vlan_tag_present(skb)) { 586 struct ndis_pkt_8021q_info *vlan; 587 588 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 589 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 590 IEEE_8021Q_INFO); 591 592 vlan->value = 0; 593 vlan->vlanid = skb_vlan_tag_get_id(skb); 594 vlan->cfi = skb_vlan_tag_get_cfi(skb); 595 vlan->pri = skb_vlan_tag_get_prio(skb); 596 } 597 598 if (skb_is_gso(skb)) { 599 struct ndis_tcp_lso_info *lso_info; 600 601 rndis_msg_size += NDIS_LSO_PPI_SIZE; 602 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 603 TCP_LARGESEND_PKTINFO); 604 605 lso_info->value = 0; 606 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 607 if (skb->protocol == htons(ETH_P_IP)) { 608 lso_info->lso_v2_transmit.ip_version = 609 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 610 ip_hdr(skb)->tot_len = 0; 611 ip_hdr(skb)->check = 0; 612 tcp_hdr(skb)->check = 613 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 614 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 615 } else { 616 lso_info->lso_v2_transmit.ip_version = 617 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 618 tcp_v6_gso_csum_prep(skb); 619 } 620 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); 621 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 622 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 623 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { 624 struct ndis_tcp_ip_checksum_info *csum_info; 625 626 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 627 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 628 TCPIP_CHKSUM_PKTINFO); 629 630 csum_info->value = 0; 631 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); 632 633 if (skb->protocol == htons(ETH_P_IP)) { 634 csum_info->transmit.is_ipv4 = 1; 635 636 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 637 csum_info->transmit.tcp_checksum = 1; 638 else 639 csum_info->transmit.udp_checksum = 1; 640 } else { 641 csum_info->transmit.is_ipv6 = 1; 642 643 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 644 csum_info->transmit.tcp_checksum = 1; 645 else 646 csum_info->transmit.udp_checksum = 1; 647 } 648 } else { 649 /* Can't do offload of this type of checksum */ 650 if (skb_checksum_help(skb)) 651 goto drop; 652 } 653 } 654 655 /* Start filling in the page buffers with the rndis hdr */ 656 rndis_msg->msg_len += rndis_msg_size; 657 packet->total_data_buflen = rndis_msg->msg_len; 658 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 659 skb, packet, pb); 660 661 /* timestamp packet in software */ 662 skb_tx_timestamp(skb); 663 664 ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx); 665 if (likely(ret == 0)) 666 return NETDEV_TX_OK; 667 668 if (ret == -EAGAIN) { 669 ++net_device_ctx->eth_stats.tx_busy; 670 return NETDEV_TX_BUSY; 671 } 672 673 if (ret == -ENOSPC) 674 ++net_device_ctx->eth_stats.tx_no_space; 675 676 drop: 677 dev_kfree_skb_any(skb); 678 net->stats.tx_dropped++; 679 680 return NETDEV_TX_OK; 681 682 no_memory: 683 ++net_device_ctx->eth_stats.tx_no_memory; 684 goto drop; 685 } 686 687 static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, 688 struct net_device *ndev) 689 { 690 return netvsc_xmit(skb, ndev, false); 691 } 692 693 /* 694 * netvsc_linkstatus_callback - Link up/down notification 695 */ 696 void netvsc_linkstatus_callback(struct net_device *net, 697 struct rndis_message *resp, 698 void *data, u32 data_buflen) 699 { 700 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 701 struct net_device_context *ndev_ctx = netdev_priv(net); 702 struct netvsc_reconfig *event; 703 unsigned long flags; 704 705 /* Ensure the packet is big enough to access its fields */ 706 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) { 707 netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n", 708 resp->msg_len); 709 return; 710 } 711 712 /* Copy the RNDIS indicate status into nvchan->recv_buf */ 713 memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate)); 714 715 /* Update the physical link speed when changing to another vSwitch */ 716 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { 717 u32 speed; 718 719 /* Validate status_buf_offset and status_buflen. 720 * 721 * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account 722 * for the status buffer field in resp->msg_len; perform the validation 723 * using data_buflen (>= resp->msg_len). 724 */ 725 if (indicate->status_buflen < sizeof(speed) || 726 indicate->status_buf_offset < sizeof(*indicate) || 727 data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset || 728 data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset 729 < indicate->status_buflen) { 730 netdev_err(net, "invalid rndis_indicate_status packet\n"); 731 return; 732 } 733 734 speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000; 735 ndev_ctx->speed = speed; 736 return; 737 } 738 739 /* Handle these link change statuses below */ 740 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && 741 indicate->status != RNDIS_STATUS_MEDIA_CONNECT && 742 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) 743 return; 744 745 if (net->reg_state != NETREG_REGISTERED) 746 return; 747 748 event = kzalloc(sizeof(*event), GFP_ATOMIC); 749 if (!event) 750 return; 751 event->event = indicate->status; 752 753 spin_lock_irqsave(&ndev_ctx->lock, flags); 754 list_add_tail(&event->list, &ndev_ctx->reconfig_events); 755 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 756 757 schedule_delayed_work(&ndev_ctx->dwork, 0); 758 } 759 760 /* This function should only be called after skb_record_rx_queue() */ 761 void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) 762 { 763 int rc; 764 765 skb->queue_mapping = skb_get_rx_queue(skb); 766 __skb_push(skb, ETH_HLEN); 767 768 rc = netvsc_xmit(skb, ndev, true); 769 770 if (dev_xmit_complete(rc)) 771 return; 772 773 dev_kfree_skb_any(skb); 774 ndev->stats.tx_dropped++; 775 } 776 777 static void netvsc_comp_ipcsum(struct sk_buff *skb) 778 { 779 struct iphdr *iph = (struct iphdr *)skb->data; 780 781 iph->check = 0; 782 iph->check = ip_fast_csum(iph, iph->ihl); 783 } 784 785 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, 786 struct netvsc_channel *nvchan, 787 struct xdp_buff *xdp) 788 { 789 struct napi_struct *napi = &nvchan->napi; 790 const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan; 791 const struct ndis_tcp_ip_checksum_info *csum_info = 792 &nvchan->rsc.csum_info; 793 const u32 *hash_info = &nvchan->rsc.hash_info; 794 u8 ppi_flags = nvchan->rsc.ppi_flags; 795 struct sk_buff *skb; 796 void *xbuf = xdp->data_hard_start; 797 int i; 798 799 if (xbuf) { 800 unsigned int hdroom = xdp->data - xdp->data_hard_start; 801 unsigned int xlen = xdp->data_end - xdp->data; 802 unsigned int frag_size = xdp->frame_sz; 803 804 skb = build_skb(xbuf, frag_size); 805 806 if (!skb) { 807 __free_page(virt_to_page(xbuf)); 808 return NULL; 809 } 810 811 skb_reserve(skb, hdroom); 812 skb_put(skb, xlen); 813 skb->dev = napi->dev; 814 } else { 815 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen); 816 817 if (!skb) 818 return NULL; 819 820 /* Copy to skb. This copy is needed here since the memory 821 * pointed by hv_netvsc_packet cannot be deallocated. 822 */ 823 for (i = 0; i < nvchan->rsc.cnt; i++) 824 skb_put_data(skb, nvchan->rsc.data[i], 825 nvchan->rsc.len[i]); 826 } 827 828 skb->protocol = eth_type_trans(skb, net); 829 830 /* skb is already created with CHECKSUM_NONE */ 831 skb_checksum_none_assert(skb); 832 833 /* Incoming packets may have IP header checksum verified by the host. 834 * They may not have IP header checksum computed after coalescing. 835 * We compute it here if the flags are set, because on Linux, the IP 836 * checksum is always checked. 837 */ 838 if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid && 839 csum_info->receive.ip_checksum_succeeded && 840 skb->protocol == htons(ETH_P_IP)) { 841 /* Check that there is enough space to hold the IP header. */ 842 if (skb_headlen(skb) < sizeof(struct iphdr)) { 843 kfree_skb(skb); 844 return NULL; 845 } 846 netvsc_comp_ipcsum(skb); 847 } 848 849 /* Do L4 checksum offload if enabled and present. */ 850 if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) { 851 if (csum_info->receive.tcp_checksum_succeeded || 852 csum_info->receive.udp_checksum_succeeded) 853 skb->ip_summed = CHECKSUM_UNNECESSARY; 854 } 855 856 if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH)) 857 skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4); 858 859 if (ppi_flags & NVSC_RSC_VLAN) { 860 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) | 861 (vlan->cfi ? VLAN_CFI_MASK : 0); 862 863 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 864 vlan_tci); 865 } 866 867 return skb; 868 } 869 870 /* 871 * netvsc_recv_callback - Callback when we receive a packet from the 872 * "wire" on the specified device. 873 */ 874 int netvsc_recv_callback(struct net_device *net, 875 struct netvsc_device *net_device, 876 struct netvsc_channel *nvchan) 877 { 878 struct net_device_context *net_device_ctx = netdev_priv(net); 879 struct vmbus_channel *channel = nvchan->channel; 880 u16 q_idx = channel->offermsg.offer.sub_channel_index; 881 struct sk_buff *skb; 882 struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; 883 struct xdp_buff xdp; 884 u32 act; 885 886 if (net->reg_state != NETREG_REGISTERED) 887 return NVSP_STAT_FAIL; 888 889 act = netvsc_run_xdp(net, nvchan, &xdp); 890 891 if (act == XDP_REDIRECT) 892 return NVSP_STAT_SUCCESS; 893 894 if (act != XDP_PASS && act != XDP_TX) { 895 u64_stats_update_begin(&rx_stats->syncp); 896 rx_stats->xdp_drop++; 897 u64_stats_update_end(&rx_stats->syncp); 898 899 return NVSP_STAT_SUCCESS; /* consumed by XDP */ 900 } 901 902 /* Allocate a skb - TODO direct I/O to pages? */ 903 skb = netvsc_alloc_recv_skb(net, nvchan, &xdp); 904 905 if (unlikely(!skb)) { 906 ++net_device_ctx->eth_stats.rx_no_memory; 907 return NVSP_STAT_FAIL; 908 } 909 910 skb_record_rx_queue(skb, q_idx); 911 912 /* 913 * Even if injecting the packet, record the statistics 914 * on the synthetic device because modifying the VF device 915 * statistics will not work correctly. 916 */ 917 u64_stats_update_begin(&rx_stats->syncp); 918 if (act == XDP_TX) 919 rx_stats->xdp_tx++; 920 921 rx_stats->packets++; 922 rx_stats->bytes += nvchan->rsc.pktlen; 923 924 if (skb->pkt_type == PACKET_BROADCAST) 925 ++rx_stats->broadcast; 926 else if (skb->pkt_type == PACKET_MULTICAST) 927 ++rx_stats->multicast; 928 u64_stats_update_end(&rx_stats->syncp); 929 930 if (act == XDP_TX) { 931 netvsc_xdp_xmit(skb, net); 932 return NVSP_STAT_SUCCESS; 933 } 934 935 napi_gro_receive(&nvchan->napi, skb); 936 return NVSP_STAT_SUCCESS; 937 } 938 939 static void netvsc_get_drvinfo(struct net_device *net, 940 struct ethtool_drvinfo *info) 941 { 942 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 943 strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); 944 } 945 946 static void netvsc_get_channels(struct net_device *net, 947 struct ethtool_channels *channel) 948 { 949 struct net_device_context *net_device_ctx = netdev_priv(net); 950 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 951 952 if (nvdev) { 953 channel->max_combined = nvdev->max_chn; 954 channel->combined_count = nvdev->num_chn; 955 } 956 } 957 958 /* Alloc struct netvsc_device_info, and initialize it from either existing 959 * struct netvsc_device, or from default values. 960 */ 961 static 962 struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev) 963 { 964 struct netvsc_device_info *dev_info; 965 struct bpf_prog *prog; 966 967 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); 968 969 if (!dev_info) 970 return NULL; 971 972 if (nvdev) { 973 ASSERT_RTNL(); 974 975 dev_info->num_chn = nvdev->num_chn; 976 dev_info->send_sections = nvdev->send_section_cnt; 977 dev_info->send_section_size = nvdev->send_section_size; 978 dev_info->recv_sections = nvdev->recv_section_cnt; 979 dev_info->recv_section_size = nvdev->recv_section_size; 980 981 memcpy(dev_info->rss_key, nvdev->extension->rss_key, 982 NETVSC_HASH_KEYLEN); 983 984 prog = netvsc_xdp_get(nvdev); 985 if (prog) { 986 bpf_prog_inc(prog); 987 dev_info->bprog = prog; 988 } 989 } else { 990 dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT, 991 netif_get_num_default_rss_queues()); 992 dev_info->send_sections = NETVSC_DEFAULT_TX; 993 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; 994 dev_info->recv_sections = NETVSC_DEFAULT_RX; 995 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; 996 } 997 998 return dev_info; 999 } 1000 1001 /* Free struct netvsc_device_info */ 1002 static void netvsc_devinfo_put(struct netvsc_device_info *dev_info) 1003 { 1004 if (dev_info->bprog) { 1005 ASSERT_RTNL(); 1006 bpf_prog_put(dev_info->bprog); 1007 } 1008 1009 kfree(dev_info); 1010 } 1011 1012 static int netvsc_detach(struct net_device *ndev, 1013 struct netvsc_device *nvdev) 1014 { 1015 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1016 struct hv_device *hdev = ndev_ctx->device_ctx; 1017 int ret; 1018 1019 /* Don't try continuing to try and setup sub channels */ 1020 if (cancel_work_sync(&nvdev->subchan_work)) 1021 nvdev->num_chn = 1; 1022 1023 netvsc_xdp_set(ndev, NULL, NULL, nvdev); 1024 1025 /* If device was up (receiving) then shutdown */ 1026 if (netif_running(ndev)) { 1027 netvsc_tx_disable(nvdev, ndev); 1028 1029 ret = rndis_filter_close(nvdev); 1030 if (ret) { 1031 netdev_err(ndev, 1032 "unable to close device (ret %d).\n", ret); 1033 return ret; 1034 } 1035 1036 ret = netvsc_wait_until_empty(nvdev); 1037 if (ret) { 1038 netdev_err(ndev, 1039 "Ring buffer not empty after closing rndis\n"); 1040 return ret; 1041 } 1042 } 1043 1044 netif_device_detach(ndev); 1045 1046 rndis_filter_device_remove(hdev, nvdev); 1047 1048 return 0; 1049 } 1050 1051 static int netvsc_attach(struct net_device *ndev, 1052 struct netvsc_device_info *dev_info) 1053 { 1054 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1055 struct hv_device *hdev = ndev_ctx->device_ctx; 1056 struct netvsc_device *nvdev; 1057 struct rndis_device *rdev; 1058 struct bpf_prog *prog; 1059 int ret = 0; 1060 1061 nvdev = rndis_filter_device_add(hdev, dev_info); 1062 if (IS_ERR(nvdev)) 1063 return PTR_ERR(nvdev); 1064 1065 if (nvdev->num_chn > 1) { 1066 ret = rndis_set_subchannel(ndev, nvdev, dev_info); 1067 1068 /* if unavailable, just proceed with one queue */ 1069 if (ret) { 1070 nvdev->max_chn = 1; 1071 nvdev->num_chn = 1; 1072 } 1073 } 1074 1075 prog = dev_info->bprog; 1076 if (prog) { 1077 bpf_prog_inc(prog); 1078 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); 1079 if (ret) { 1080 bpf_prog_put(prog); 1081 goto err1; 1082 } 1083 } 1084 1085 /* In any case device is now ready */ 1086 nvdev->tx_disable = false; 1087 netif_device_attach(ndev); 1088 1089 /* Note: enable and attach happen when sub-channels setup */ 1090 netif_carrier_off(ndev); 1091 1092 if (netif_running(ndev)) { 1093 ret = rndis_filter_open(nvdev); 1094 if (ret) 1095 goto err2; 1096 1097 rdev = nvdev->extension; 1098 if (!rdev->link_state) 1099 netif_carrier_on(ndev); 1100 } 1101 1102 return 0; 1103 1104 err2: 1105 netif_device_detach(ndev); 1106 1107 err1: 1108 rndis_filter_device_remove(hdev, nvdev); 1109 1110 return ret; 1111 } 1112 1113 static int netvsc_set_channels(struct net_device *net, 1114 struct ethtool_channels *channels) 1115 { 1116 struct net_device_context *net_device_ctx = netdev_priv(net); 1117 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 1118 unsigned int orig, count = channels->combined_count; 1119 struct netvsc_device_info *device_info; 1120 int ret; 1121 1122 /* We do not support separate count for rx, tx, or other */ 1123 if (count == 0 || 1124 channels->rx_count || channels->tx_count || channels->other_count) 1125 return -EINVAL; 1126 1127 if (!nvdev || nvdev->destroy) 1128 return -ENODEV; 1129 1130 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) 1131 return -EINVAL; 1132 1133 if (count > nvdev->max_chn) 1134 return -EINVAL; 1135 1136 orig = nvdev->num_chn; 1137 1138 device_info = netvsc_devinfo_get(nvdev); 1139 1140 if (!device_info) 1141 return -ENOMEM; 1142 1143 device_info->num_chn = count; 1144 1145 ret = netvsc_detach(net, nvdev); 1146 if (ret) 1147 goto out; 1148 1149 ret = netvsc_attach(net, device_info); 1150 if (ret) { 1151 device_info->num_chn = orig; 1152 if (netvsc_attach(net, device_info)) 1153 netdev_err(net, "restoring channel setting failed\n"); 1154 } 1155 1156 out: 1157 netvsc_devinfo_put(device_info); 1158 return ret; 1159 } 1160 1161 static void netvsc_init_settings(struct net_device *dev) 1162 { 1163 struct net_device_context *ndc = netdev_priv(dev); 1164 1165 ndc->l4_hash = HV_DEFAULT_L4HASH; 1166 1167 ndc->speed = SPEED_UNKNOWN; 1168 ndc->duplex = DUPLEX_FULL; 1169 1170 dev->features = NETIF_F_LRO; 1171 } 1172 1173 static int netvsc_get_link_ksettings(struct net_device *dev, 1174 struct ethtool_link_ksettings *cmd) 1175 { 1176 struct net_device_context *ndc = netdev_priv(dev); 1177 struct net_device *vf_netdev; 1178 1179 vf_netdev = rtnl_dereference(ndc->vf_netdev); 1180 1181 if (vf_netdev) 1182 return __ethtool_get_link_ksettings(vf_netdev, cmd); 1183 1184 cmd->base.speed = ndc->speed; 1185 cmd->base.duplex = ndc->duplex; 1186 cmd->base.port = PORT_OTHER; 1187 1188 return 0; 1189 } 1190 1191 static int netvsc_set_link_ksettings(struct net_device *dev, 1192 const struct ethtool_link_ksettings *cmd) 1193 { 1194 struct net_device_context *ndc = netdev_priv(dev); 1195 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1196 1197 if (vf_netdev) { 1198 if (!vf_netdev->ethtool_ops->set_link_ksettings) 1199 return -EOPNOTSUPP; 1200 1201 return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev, 1202 cmd); 1203 } 1204 1205 return ethtool_virtdev_set_link_ksettings(dev, cmd, 1206 &ndc->speed, &ndc->duplex); 1207 } 1208 1209 static int netvsc_change_mtu(struct net_device *ndev, int mtu) 1210 { 1211 struct net_device_context *ndevctx = netdev_priv(ndev); 1212 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1213 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1214 int orig_mtu = ndev->mtu; 1215 struct netvsc_device_info *device_info; 1216 int ret = 0; 1217 1218 if (!nvdev || nvdev->destroy) 1219 return -ENODEV; 1220 1221 device_info = netvsc_devinfo_get(nvdev); 1222 1223 if (!device_info) 1224 return -ENOMEM; 1225 1226 /* Change MTU of underlying VF netdev first. */ 1227 if (vf_netdev) { 1228 ret = dev_set_mtu(vf_netdev, mtu); 1229 if (ret) 1230 goto out; 1231 } 1232 1233 ret = netvsc_detach(ndev, nvdev); 1234 if (ret) 1235 goto rollback_vf; 1236 1237 WRITE_ONCE(ndev->mtu, mtu); 1238 1239 ret = netvsc_attach(ndev, device_info); 1240 if (!ret) 1241 goto out; 1242 1243 /* Attempt rollback to original MTU */ 1244 WRITE_ONCE(ndev->mtu, orig_mtu); 1245 1246 if (netvsc_attach(ndev, device_info)) 1247 netdev_err(ndev, "restoring mtu failed\n"); 1248 rollback_vf: 1249 if (vf_netdev) 1250 dev_set_mtu(vf_netdev, orig_mtu); 1251 1252 out: 1253 netvsc_devinfo_put(device_info); 1254 return ret; 1255 } 1256 1257 static void netvsc_get_vf_stats(struct net_device *net, 1258 struct netvsc_vf_pcpu_stats *tot) 1259 { 1260 struct net_device_context *ndev_ctx = netdev_priv(net); 1261 int i; 1262 1263 memset(tot, 0, sizeof(*tot)); 1264 1265 for_each_possible_cpu(i) { 1266 const struct netvsc_vf_pcpu_stats *stats 1267 = per_cpu_ptr(ndev_ctx->vf_stats, i); 1268 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1269 unsigned int start; 1270 1271 do { 1272 start = u64_stats_fetch_begin(&stats->syncp); 1273 rx_packets = stats->rx_packets; 1274 tx_packets = stats->tx_packets; 1275 rx_bytes = stats->rx_bytes; 1276 tx_bytes = stats->tx_bytes; 1277 } while (u64_stats_fetch_retry(&stats->syncp, start)); 1278 1279 tot->rx_packets += rx_packets; 1280 tot->tx_packets += tx_packets; 1281 tot->rx_bytes += rx_bytes; 1282 tot->tx_bytes += tx_bytes; 1283 tot->tx_dropped += stats->tx_dropped; 1284 } 1285 } 1286 1287 static void netvsc_get_pcpu_stats(struct net_device *net, 1288 struct netvsc_ethtool_pcpu_stats *pcpu_tot) 1289 { 1290 struct net_device_context *ndev_ctx = netdev_priv(net); 1291 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1292 int i; 1293 1294 /* fetch percpu stats of vf */ 1295 for_each_possible_cpu(i) { 1296 const struct netvsc_vf_pcpu_stats *stats = 1297 per_cpu_ptr(ndev_ctx->vf_stats, i); 1298 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; 1299 unsigned int start; 1300 1301 do { 1302 start = u64_stats_fetch_begin(&stats->syncp); 1303 this_tot->vf_rx_packets = stats->rx_packets; 1304 this_tot->vf_tx_packets = stats->tx_packets; 1305 this_tot->vf_rx_bytes = stats->rx_bytes; 1306 this_tot->vf_tx_bytes = stats->tx_bytes; 1307 } while (u64_stats_fetch_retry(&stats->syncp, start)); 1308 this_tot->rx_packets = this_tot->vf_rx_packets; 1309 this_tot->tx_packets = this_tot->vf_tx_packets; 1310 this_tot->rx_bytes = this_tot->vf_rx_bytes; 1311 this_tot->tx_bytes = this_tot->vf_tx_bytes; 1312 } 1313 1314 /* fetch percpu stats of netvsc */ 1315 for (i = 0; i < nvdev->num_chn; i++) { 1316 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1317 const struct netvsc_stats_tx *tx_stats; 1318 const struct netvsc_stats_rx *rx_stats; 1319 struct netvsc_ethtool_pcpu_stats *this_tot = 1320 &pcpu_tot[nvchan->channel->target_cpu]; 1321 u64 packets, bytes; 1322 unsigned int start; 1323 1324 tx_stats = &nvchan->tx_stats; 1325 do { 1326 start = u64_stats_fetch_begin(&tx_stats->syncp); 1327 packets = tx_stats->packets; 1328 bytes = tx_stats->bytes; 1329 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 1330 1331 this_tot->tx_bytes += bytes; 1332 this_tot->tx_packets += packets; 1333 1334 rx_stats = &nvchan->rx_stats; 1335 do { 1336 start = u64_stats_fetch_begin(&rx_stats->syncp); 1337 packets = rx_stats->packets; 1338 bytes = rx_stats->bytes; 1339 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 1340 1341 this_tot->rx_bytes += bytes; 1342 this_tot->rx_packets += packets; 1343 } 1344 } 1345 1346 static void netvsc_get_stats64(struct net_device *net, 1347 struct rtnl_link_stats64 *t) 1348 { 1349 struct net_device_context *ndev_ctx = netdev_priv(net); 1350 struct netvsc_device *nvdev; 1351 struct netvsc_vf_pcpu_stats vf_tot; 1352 int i; 1353 1354 rcu_read_lock(); 1355 1356 nvdev = rcu_dereference(ndev_ctx->nvdev); 1357 if (!nvdev) 1358 goto out; 1359 1360 netdev_stats_to_stats64(t, &net->stats); 1361 1362 netvsc_get_vf_stats(net, &vf_tot); 1363 t->rx_packets += vf_tot.rx_packets; 1364 t->tx_packets += vf_tot.tx_packets; 1365 t->rx_bytes += vf_tot.rx_bytes; 1366 t->tx_bytes += vf_tot.tx_bytes; 1367 t->tx_dropped += vf_tot.tx_dropped; 1368 1369 for (i = 0; i < nvdev->num_chn; i++) { 1370 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1371 const struct netvsc_stats_tx *tx_stats; 1372 const struct netvsc_stats_rx *rx_stats; 1373 u64 packets, bytes, multicast; 1374 unsigned int start; 1375 1376 tx_stats = &nvchan->tx_stats; 1377 do { 1378 start = u64_stats_fetch_begin(&tx_stats->syncp); 1379 packets = tx_stats->packets; 1380 bytes = tx_stats->bytes; 1381 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 1382 1383 t->tx_bytes += bytes; 1384 t->tx_packets += packets; 1385 1386 rx_stats = &nvchan->rx_stats; 1387 do { 1388 start = u64_stats_fetch_begin(&rx_stats->syncp); 1389 packets = rx_stats->packets; 1390 bytes = rx_stats->bytes; 1391 multicast = rx_stats->multicast + rx_stats->broadcast; 1392 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 1393 1394 t->rx_bytes += bytes; 1395 t->rx_packets += packets; 1396 t->multicast += multicast; 1397 } 1398 out: 1399 rcu_read_unlock(); 1400 } 1401 1402 static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1403 { 1404 struct net_device_context *ndc = netdev_priv(ndev); 1405 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1406 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1407 struct sockaddr *addr = p; 1408 int err; 1409 1410 err = eth_prepare_mac_addr_change(ndev, p); 1411 if (err) 1412 return err; 1413 1414 if (!nvdev) 1415 return -ENODEV; 1416 1417 if (vf_netdev) { 1418 err = dev_set_mac_address(vf_netdev, addr, NULL); 1419 if (err) 1420 return err; 1421 } 1422 1423 err = rndis_filter_set_device_mac(nvdev, addr->sa_data); 1424 if (!err) { 1425 eth_commit_mac_addr_change(ndev, p); 1426 } else if (vf_netdev) { 1427 /* rollback change on VF */ 1428 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); 1429 dev_set_mac_address(vf_netdev, addr, NULL); 1430 } 1431 1432 return err; 1433 } 1434 1435 static const struct { 1436 char name[ETH_GSTRING_LEN]; 1437 u16 offset; 1438 } netvsc_stats[] = { 1439 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, 1440 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, 1441 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, 1442 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, 1443 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, 1444 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, 1445 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, 1446 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, 1447 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, 1448 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, 1449 { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) }, 1450 }, pcpu_stats[] = { 1451 { "cpu%u_rx_packets", 1452 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, 1453 { "cpu%u_rx_bytes", 1454 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, 1455 { "cpu%u_tx_packets", 1456 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, 1457 { "cpu%u_tx_bytes", 1458 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, 1459 { "cpu%u_vf_rx_packets", 1460 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, 1461 { "cpu%u_vf_rx_bytes", 1462 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, 1463 { "cpu%u_vf_tx_packets", 1464 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, 1465 { "cpu%u_vf_tx_bytes", 1466 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, 1467 }, vf_stats[] = { 1468 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, 1469 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, 1470 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, 1471 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, 1472 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, 1473 }; 1474 1475 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) 1476 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) 1477 1478 /* statistics per queue (rx/tx packets/bytes) */ 1479 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) 1480 1481 /* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */ 1482 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8) 1483 1484 static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1485 { 1486 struct net_device_context *ndc = netdev_priv(dev); 1487 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1488 1489 if (!nvdev) 1490 return -ENODEV; 1491 1492 switch (string_set) { 1493 case ETH_SS_STATS: 1494 return NETVSC_GLOBAL_STATS_LEN 1495 + NETVSC_VF_STATS_LEN 1496 + NETVSC_QUEUE_STATS_LEN(nvdev) 1497 + NETVSC_PCPU_STATS_LEN; 1498 default: 1499 return -EINVAL; 1500 } 1501 } 1502 1503 static void netvsc_get_ethtool_stats(struct net_device *dev, 1504 struct ethtool_stats *stats, u64 *data) 1505 { 1506 struct net_device_context *ndc = netdev_priv(dev); 1507 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1508 const void *nds = &ndc->eth_stats; 1509 const struct netvsc_stats_tx *tx_stats; 1510 const struct netvsc_stats_rx *rx_stats; 1511 struct netvsc_vf_pcpu_stats sum; 1512 struct netvsc_ethtool_pcpu_stats *pcpu_sum; 1513 unsigned int start; 1514 u64 packets, bytes; 1515 u64 xdp_drop; 1516 u64 xdp_redirect; 1517 u64 xdp_tx; 1518 u64 xdp_xmit; 1519 int i, j, cpu; 1520 1521 if (!nvdev) 1522 return; 1523 1524 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) 1525 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); 1526 1527 netvsc_get_vf_stats(dev, &sum); 1528 for (j = 0; j < NETVSC_VF_STATS_LEN; j++) 1529 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); 1530 1531 for (j = 0; j < nvdev->num_chn; j++) { 1532 tx_stats = &nvdev->chan_table[j].tx_stats; 1533 1534 do { 1535 start = u64_stats_fetch_begin(&tx_stats->syncp); 1536 packets = tx_stats->packets; 1537 bytes = tx_stats->bytes; 1538 xdp_xmit = tx_stats->xdp_xmit; 1539 } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); 1540 data[i++] = packets; 1541 data[i++] = bytes; 1542 data[i++] = xdp_xmit; 1543 1544 rx_stats = &nvdev->chan_table[j].rx_stats; 1545 do { 1546 start = u64_stats_fetch_begin(&rx_stats->syncp); 1547 packets = rx_stats->packets; 1548 bytes = rx_stats->bytes; 1549 xdp_drop = rx_stats->xdp_drop; 1550 xdp_redirect = rx_stats->xdp_redirect; 1551 xdp_tx = rx_stats->xdp_tx; 1552 } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); 1553 data[i++] = packets; 1554 data[i++] = bytes; 1555 data[i++] = xdp_drop; 1556 data[i++] = xdp_redirect; 1557 data[i++] = xdp_tx; 1558 } 1559 1560 pcpu_sum = kvmalloc_array(num_possible_cpus(), 1561 sizeof(struct netvsc_ethtool_pcpu_stats), 1562 GFP_KERNEL); 1563 if (!pcpu_sum) 1564 return; 1565 1566 netvsc_get_pcpu_stats(dev, pcpu_sum); 1567 for_each_present_cpu(cpu) { 1568 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; 1569 1570 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) 1571 data[i++] = *(u64 *)((void *)this_sum 1572 + pcpu_stats[j].offset); 1573 } 1574 kvfree(pcpu_sum); 1575 } 1576 1577 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1578 { 1579 struct net_device_context *ndc = netdev_priv(dev); 1580 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1581 u8 *p = data; 1582 int i, cpu; 1583 1584 if (!nvdev) 1585 return; 1586 1587 switch (stringset) { 1588 case ETH_SS_STATS: 1589 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) 1590 ethtool_puts(&p, netvsc_stats[i].name); 1591 1592 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) 1593 ethtool_puts(&p, vf_stats[i].name); 1594 1595 for (i = 0; i < nvdev->num_chn; i++) { 1596 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 1597 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 1598 ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i); 1599 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 1600 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 1601 ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); 1602 ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i); 1603 ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i); 1604 } 1605 1606 for_each_present_cpu(cpu) { 1607 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) 1608 ethtool_sprintf(&p, pcpu_stats[i].name, cpu); 1609 } 1610 1611 break; 1612 } 1613 } 1614 1615 static int 1616 netvsc_get_rss_hash_opts(struct net_device_context *ndc, 1617 struct ethtool_rxnfc *info) 1618 { 1619 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; 1620 1621 info->data = RXH_IP_SRC | RXH_IP_DST; 1622 1623 switch (info->flow_type) { 1624 case TCP_V4_FLOW: 1625 if (ndc->l4_hash & HV_TCP4_L4HASH) 1626 info->data |= l4_flag; 1627 1628 break; 1629 1630 case TCP_V6_FLOW: 1631 if (ndc->l4_hash & HV_TCP6_L4HASH) 1632 info->data |= l4_flag; 1633 1634 break; 1635 1636 case UDP_V4_FLOW: 1637 if (ndc->l4_hash & HV_UDP4_L4HASH) 1638 info->data |= l4_flag; 1639 1640 break; 1641 1642 case UDP_V6_FLOW: 1643 if (ndc->l4_hash & HV_UDP6_L4HASH) 1644 info->data |= l4_flag; 1645 1646 break; 1647 1648 case IPV4_FLOW: 1649 case IPV6_FLOW: 1650 break; 1651 default: 1652 info->data = 0; 1653 break; 1654 } 1655 1656 return 0; 1657 } 1658 1659 static int 1660 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1661 u32 *rules) 1662 { 1663 struct net_device_context *ndc = netdev_priv(dev); 1664 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1665 1666 if (!nvdev) 1667 return -ENODEV; 1668 1669 switch (info->cmd) { 1670 case ETHTOOL_GRXRINGS: 1671 info->data = nvdev->num_chn; 1672 return 0; 1673 1674 case ETHTOOL_GRXFH: 1675 return netvsc_get_rss_hash_opts(ndc, info); 1676 } 1677 return -EOPNOTSUPP; 1678 } 1679 1680 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, 1681 struct ethtool_rxnfc *info) 1682 { 1683 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1684 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1685 switch (info->flow_type) { 1686 case TCP_V4_FLOW: 1687 ndc->l4_hash |= HV_TCP4_L4HASH; 1688 break; 1689 1690 case TCP_V6_FLOW: 1691 ndc->l4_hash |= HV_TCP6_L4HASH; 1692 break; 1693 1694 case UDP_V4_FLOW: 1695 ndc->l4_hash |= HV_UDP4_L4HASH; 1696 break; 1697 1698 case UDP_V6_FLOW: 1699 ndc->l4_hash |= HV_UDP6_L4HASH; 1700 break; 1701 1702 default: 1703 return -EOPNOTSUPP; 1704 } 1705 1706 return 0; 1707 } 1708 1709 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1710 switch (info->flow_type) { 1711 case TCP_V4_FLOW: 1712 ndc->l4_hash &= ~HV_TCP4_L4HASH; 1713 break; 1714 1715 case TCP_V6_FLOW: 1716 ndc->l4_hash &= ~HV_TCP6_L4HASH; 1717 break; 1718 1719 case UDP_V4_FLOW: 1720 ndc->l4_hash &= ~HV_UDP4_L4HASH; 1721 break; 1722 1723 case UDP_V6_FLOW: 1724 ndc->l4_hash &= ~HV_UDP6_L4HASH; 1725 break; 1726 1727 default: 1728 return -EOPNOTSUPP; 1729 } 1730 1731 return 0; 1732 } 1733 1734 return -EOPNOTSUPP; 1735 } 1736 1737 static int 1738 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) 1739 { 1740 struct net_device_context *ndc = netdev_priv(ndev); 1741 1742 if (info->cmd == ETHTOOL_SRXFH) 1743 return netvsc_set_rss_hash_opts(ndc, info); 1744 1745 return -EOPNOTSUPP; 1746 } 1747 1748 static u32 netvsc_get_rxfh_key_size(struct net_device *dev) 1749 { 1750 return NETVSC_HASH_KEYLEN; 1751 } 1752 1753 static u32 netvsc_rss_indir_size(struct net_device *dev) 1754 { 1755 struct net_device_context *ndc = netdev_priv(dev); 1756 1757 return ndc->rx_table_sz; 1758 } 1759 1760 static int netvsc_get_rxfh(struct net_device *dev, 1761 struct ethtool_rxfh_param *rxfh) 1762 { 1763 struct net_device_context *ndc = netdev_priv(dev); 1764 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1765 struct rndis_device *rndis_dev; 1766 int i; 1767 1768 if (!ndev) 1769 return -ENODEV; 1770 1771 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 1772 1773 rndis_dev = ndev->extension; 1774 if (rxfh->indir) { 1775 for (i = 0; i < ndc->rx_table_sz; i++) 1776 rxfh->indir[i] = ndc->rx_table[i]; 1777 } 1778 1779 if (rxfh->key) 1780 memcpy(rxfh->key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); 1781 1782 return 0; 1783 } 1784 1785 static int netvsc_set_rxfh(struct net_device *dev, 1786 struct ethtool_rxfh_param *rxfh, 1787 struct netlink_ext_ack *extack) 1788 { 1789 struct net_device_context *ndc = netdev_priv(dev); 1790 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1791 struct rndis_device *rndis_dev; 1792 u8 *key = rxfh->key; 1793 int i; 1794 1795 if (!ndev) 1796 return -ENODEV; 1797 1798 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 1799 rxfh->hfunc != ETH_RSS_HASH_TOP) 1800 return -EOPNOTSUPP; 1801 1802 rndis_dev = ndev->extension; 1803 if (rxfh->indir) { 1804 for (i = 0; i < ndc->rx_table_sz; i++) 1805 if (rxfh->indir[i] >= ndev->num_chn) 1806 return -EINVAL; 1807 1808 for (i = 0; i < ndc->rx_table_sz; i++) 1809 ndc->rx_table[i] = rxfh->indir[i]; 1810 } 1811 1812 if (!key) { 1813 if (!rxfh->indir) 1814 return 0; 1815 1816 key = rndis_dev->rss_key; 1817 } 1818 1819 return rndis_filter_set_rss_param(rndis_dev, key); 1820 } 1821 1822 /* Hyper-V RNDIS protocol does not have ring in the HW sense. 1823 * It does have pre-allocated receive area which is divided into sections. 1824 */ 1825 static void __netvsc_get_ringparam(struct netvsc_device *nvdev, 1826 struct ethtool_ringparam *ring) 1827 { 1828 u32 max_buf_size; 1829 1830 ring->rx_pending = nvdev->recv_section_cnt; 1831 ring->tx_pending = nvdev->send_section_cnt; 1832 1833 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 1834 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 1835 else 1836 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 1837 1838 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; 1839 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE 1840 / nvdev->send_section_size; 1841 } 1842 1843 static void netvsc_get_ringparam(struct net_device *ndev, 1844 struct ethtool_ringparam *ring, 1845 struct kernel_ethtool_ringparam *kernel_ring, 1846 struct netlink_ext_ack *extack) 1847 { 1848 struct net_device_context *ndevctx = netdev_priv(ndev); 1849 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1850 1851 if (!nvdev) 1852 return; 1853 1854 __netvsc_get_ringparam(nvdev, ring); 1855 } 1856 1857 static int netvsc_set_ringparam(struct net_device *ndev, 1858 struct ethtool_ringparam *ring, 1859 struct kernel_ethtool_ringparam *kernel_ring, 1860 struct netlink_ext_ack *extack) 1861 { 1862 struct net_device_context *ndevctx = netdev_priv(ndev); 1863 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1864 struct netvsc_device_info *device_info; 1865 struct ethtool_ringparam orig; 1866 u32 new_tx, new_rx; 1867 int ret = 0; 1868 1869 if (!nvdev || nvdev->destroy) 1870 return -ENODEV; 1871 1872 memset(&orig, 0, sizeof(orig)); 1873 __netvsc_get_ringparam(nvdev, &orig); 1874 1875 new_tx = clamp_t(u32, ring->tx_pending, 1876 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); 1877 new_rx = clamp_t(u32, ring->rx_pending, 1878 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); 1879 1880 if (new_tx == orig.tx_pending && 1881 new_rx == orig.rx_pending) 1882 return 0; /* no change */ 1883 1884 device_info = netvsc_devinfo_get(nvdev); 1885 1886 if (!device_info) 1887 return -ENOMEM; 1888 1889 device_info->send_sections = new_tx; 1890 device_info->recv_sections = new_rx; 1891 1892 ret = netvsc_detach(ndev, nvdev); 1893 if (ret) 1894 goto out; 1895 1896 ret = netvsc_attach(ndev, device_info); 1897 if (ret) { 1898 device_info->send_sections = orig.tx_pending; 1899 device_info->recv_sections = orig.rx_pending; 1900 1901 if (netvsc_attach(ndev, device_info)) 1902 netdev_err(ndev, "restoring ringparam failed"); 1903 } 1904 1905 out: 1906 netvsc_devinfo_put(device_info); 1907 return ret; 1908 } 1909 1910 static netdev_features_t netvsc_fix_features(struct net_device *ndev, 1911 netdev_features_t features) 1912 { 1913 struct net_device_context *ndevctx = netdev_priv(ndev); 1914 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1915 1916 if (!nvdev || nvdev->destroy) 1917 return features; 1918 1919 if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) { 1920 features ^= NETIF_F_LRO; 1921 netdev_info(ndev, "Skip LRO - unsupported with XDP\n"); 1922 } 1923 1924 return features; 1925 } 1926 1927 static int netvsc_set_features(struct net_device *ndev, 1928 netdev_features_t features) 1929 { 1930 netdev_features_t change = features ^ ndev->features; 1931 struct net_device_context *ndevctx = netdev_priv(ndev); 1932 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1933 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1934 struct ndis_offload_params offloads; 1935 int ret = 0; 1936 1937 if (!nvdev || nvdev->destroy) 1938 return -ENODEV; 1939 1940 if (!(change & NETIF_F_LRO)) 1941 goto syncvf; 1942 1943 memset(&offloads, 0, sizeof(struct ndis_offload_params)); 1944 1945 if (features & NETIF_F_LRO) { 1946 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; 1947 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; 1948 } else { 1949 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; 1950 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; 1951 } 1952 1953 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads); 1954 1955 if (ret) { 1956 features ^= NETIF_F_LRO; 1957 ndev->features = features; 1958 } 1959 1960 syncvf: 1961 if (!vf_netdev) 1962 return ret; 1963 1964 vf_netdev->wanted_features = features; 1965 netdev_update_features(vf_netdev); 1966 1967 return ret; 1968 } 1969 1970 static int netvsc_get_regs_len(struct net_device *netdev) 1971 { 1972 return VRSS_SEND_TAB_SIZE * sizeof(u32); 1973 } 1974 1975 static void netvsc_get_regs(struct net_device *netdev, 1976 struct ethtool_regs *regs, void *p) 1977 { 1978 struct net_device_context *ndc = netdev_priv(netdev); 1979 u32 *regs_buff = p; 1980 1981 /* increase the version, if buffer format is changed. */ 1982 regs->version = 1; 1983 1984 memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32)); 1985 } 1986 1987 static u32 netvsc_get_msglevel(struct net_device *ndev) 1988 { 1989 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1990 1991 return ndev_ctx->msg_enable; 1992 } 1993 1994 static void netvsc_set_msglevel(struct net_device *ndev, u32 val) 1995 { 1996 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1997 1998 ndev_ctx->msg_enable = val; 1999 } 2000 2001 static const struct ethtool_ops ethtool_ops = { 2002 .get_drvinfo = netvsc_get_drvinfo, 2003 .get_regs_len = netvsc_get_regs_len, 2004 .get_regs = netvsc_get_regs, 2005 .get_msglevel = netvsc_get_msglevel, 2006 .set_msglevel = netvsc_set_msglevel, 2007 .get_link = ethtool_op_get_link, 2008 .get_ethtool_stats = netvsc_get_ethtool_stats, 2009 .get_sset_count = netvsc_get_sset_count, 2010 .get_strings = netvsc_get_strings, 2011 .get_channels = netvsc_get_channels, 2012 .set_channels = netvsc_set_channels, 2013 .get_ts_info = ethtool_op_get_ts_info, 2014 .get_rxnfc = netvsc_get_rxnfc, 2015 .set_rxnfc = netvsc_set_rxnfc, 2016 .get_rxfh_key_size = netvsc_get_rxfh_key_size, 2017 .get_rxfh_indir_size = netvsc_rss_indir_size, 2018 .get_rxfh = netvsc_get_rxfh, 2019 .set_rxfh = netvsc_set_rxfh, 2020 .get_link_ksettings = netvsc_get_link_ksettings, 2021 .set_link_ksettings = netvsc_set_link_ksettings, 2022 .get_ringparam = netvsc_get_ringparam, 2023 .set_ringparam = netvsc_set_ringparam, 2024 }; 2025 2026 static const struct net_device_ops device_ops = { 2027 .ndo_open = netvsc_open, 2028 .ndo_stop = netvsc_close, 2029 .ndo_start_xmit = netvsc_start_xmit, 2030 .ndo_change_rx_flags = netvsc_change_rx_flags, 2031 .ndo_set_rx_mode = netvsc_set_rx_mode, 2032 .ndo_fix_features = netvsc_fix_features, 2033 .ndo_set_features = netvsc_set_features, 2034 .ndo_change_mtu = netvsc_change_mtu, 2035 .ndo_validate_addr = eth_validate_addr, 2036 .ndo_set_mac_address = netvsc_set_mac_addr, 2037 .ndo_select_queue = netvsc_select_queue, 2038 .ndo_get_stats64 = netvsc_get_stats64, 2039 .ndo_bpf = netvsc_bpf, 2040 .ndo_xdp_xmit = netvsc_ndoxdp_xmit, 2041 }; 2042 2043 /* 2044 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link 2045 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is 2046 * present send GARP packet to network peers with netif_notify_peers(). 2047 */ 2048 static void netvsc_link_change(struct work_struct *w) 2049 { 2050 struct net_device_context *ndev_ctx = 2051 container_of(w, struct net_device_context, dwork.work); 2052 struct hv_device *device_obj = ndev_ctx->device_ctx; 2053 struct net_device *net = hv_get_drvdata(device_obj); 2054 unsigned long flags, next_reconfig, delay; 2055 struct netvsc_reconfig *event = NULL; 2056 struct netvsc_device *net_device; 2057 struct rndis_device *rdev; 2058 bool reschedule = false; 2059 2060 /* if changes are happening, comeback later */ 2061 if (!rtnl_trylock()) { 2062 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 2063 return; 2064 } 2065 2066 net_device = rtnl_dereference(ndev_ctx->nvdev); 2067 if (!net_device) 2068 goto out_unlock; 2069 2070 rdev = net_device->extension; 2071 2072 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; 2073 if (time_is_after_jiffies(next_reconfig)) { 2074 /* link_watch only sends one notification with current state 2075 * per second, avoid doing reconfig more frequently. Handle 2076 * wrap around. 2077 */ 2078 delay = next_reconfig - jiffies; 2079 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; 2080 schedule_delayed_work(&ndev_ctx->dwork, delay); 2081 goto out_unlock; 2082 } 2083 ndev_ctx->last_reconfig = jiffies; 2084 2085 spin_lock_irqsave(&ndev_ctx->lock, flags); 2086 if (!list_empty(&ndev_ctx->reconfig_events)) { 2087 event = list_first_entry(&ndev_ctx->reconfig_events, 2088 struct netvsc_reconfig, list); 2089 list_del(&event->list); 2090 reschedule = !list_empty(&ndev_ctx->reconfig_events); 2091 } 2092 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 2093 2094 if (!event) 2095 goto out_unlock; 2096 2097 switch (event->event) { 2098 /* Only the following events are possible due to the check in 2099 * netvsc_linkstatus_callback() 2100 */ 2101 case RNDIS_STATUS_MEDIA_CONNECT: 2102 if (rdev->link_state) { 2103 rdev->link_state = false; 2104 netif_carrier_on(net); 2105 netvsc_tx_enable(net_device, net); 2106 } else { 2107 __netdev_notify_peers(net); 2108 } 2109 kfree(event); 2110 break; 2111 case RNDIS_STATUS_MEDIA_DISCONNECT: 2112 if (!rdev->link_state) { 2113 rdev->link_state = true; 2114 netif_carrier_off(net); 2115 netvsc_tx_disable(net_device, net); 2116 } 2117 kfree(event); 2118 break; 2119 case RNDIS_STATUS_NETWORK_CHANGE: 2120 /* Only makes sense if carrier is present */ 2121 if (!rdev->link_state) { 2122 rdev->link_state = true; 2123 netif_carrier_off(net); 2124 netvsc_tx_disable(net_device, net); 2125 event->event = RNDIS_STATUS_MEDIA_CONNECT; 2126 spin_lock_irqsave(&ndev_ctx->lock, flags); 2127 list_add(&event->list, &ndev_ctx->reconfig_events); 2128 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 2129 reschedule = true; 2130 } 2131 break; 2132 } 2133 2134 rtnl_unlock(); 2135 2136 /* link_watch only sends one notification with current state per 2137 * second, handle next reconfig event in 2 seconds. 2138 */ 2139 if (reschedule) 2140 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 2141 2142 return; 2143 2144 out_unlock: 2145 rtnl_unlock(); 2146 } 2147 2148 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 2149 { 2150 struct net_device_context *net_device_ctx; 2151 struct net_device *dev; 2152 2153 dev = netdev_master_upper_dev_get(vf_netdev); 2154 if (!dev || dev->netdev_ops != &device_ops) 2155 return NULL; /* not a netvsc device */ 2156 2157 net_device_ctx = netdev_priv(dev); 2158 if (!rtnl_dereference(net_device_ctx->nvdev)) 2159 return NULL; /* device is removed */ 2160 2161 return dev; 2162 } 2163 2164 /* Called when VF is injecting data into network stack. 2165 * Change the associated network device from VF to netvsc. 2166 * note: already called with rcu_read_lock 2167 */ 2168 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) 2169 { 2170 struct sk_buff *skb = *pskb; 2171 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); 2172 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2173 struct netvsc_vf_pcpu_stats *pcpu_stats 2174 = this_cpu_ptr(ndev_ctx->vf_stats); 2175 2176 skb = skb_share_check(skb, GFP_ATOMIC); 2177 if (unlikely(!skb)) 2178 return RX_HANDLER_CONSUMED; 2179 2180 *pskb = skb; 2181 2182 skb->dev = ndev; 2183 2184 u64_stats_update_begin(&pcpu_stats->syncp); 2185 pcpu_stats->rx_packets++; 2186 pcpu_stats->rx_bytes += skb->len; 2187 u64_stats_update_end(&pcpu_stats->syncp); 2188 2189 return RX_HANDLER_ANOTHER; 2190 } 2191 2192 static int netvsc_vf_join(struct net_device *vf_netdev, 2193 struct net_device *ndev, int context) 2194 { 2195 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2196 int ret; 2197 2198 ret = netdev_rx_handler_register(vf_netdev, 2199 netvsc_vf_handle_frame, ndev); 2200 if (ret != 0) { 2201 netdev_err(vf_netdev, 2202 "can not register netvsc VF receive handler (err = %d)\n", 2203 ret); 2204 goto rx_handler_failed; 2205 } 2206 2207 ret = netdev_master_upper_dev_link(vf_netdev, ndev, 2208 NULL, NULL, NULL); 2209 if (ret != 0) { 2210 netdev_err(vf_netdev, 2211 "can not set master device %s (err = %d)\n", 2212 ndev->name, ret); 2213 goto upper_link_failed; 2214 } 2215 2216 /* If this registration is called from probe context vf_takeover 2217 * is taken care of later in probe itself. 2218 */ 2219 if (context == VF_REG_IN_NOTIFIER) 2220 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); 2221 2222 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); 2223 2224 netdev_info(vf_netdev, "joined to %s\n", ndev->name); 2225 return 0; 2226 2227 upper_link_failed: 2228 netdev_rx_handler_unregister(vf_netdev); 2229 rx_handler_failed: 2230 return ret; 2231 } 2232 2233 static void __netvsc_vf_setup(struct net_device *ndev, 2234 struct net_device *vf_netdev) 2235 { 2236 int ret; 2237 2238 /* Align MTU of VF with master */ 2239 ret = dev_set_mtu(vf_netdev, ndev->mtu); 2240 if (ret) 2241 netdev_warn(vf_netdev, 2242 "unable to change mtu to %u\n", ndev->mtu); 2243 2244 /* set multicast etc flags on VF */ 2245 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL); 2246 2247 /* sync address list from ndev to VF */ 2248 netif_addr_lock_bh(ndev); 2249 dev_uc_sync(vf_netdev, ndev); 2250 dev_mc_sync(vf_netdev, ndev); 2251 netif_addr_unlock_bh(ndev); 2252 2253 if (netif_running(ndev)) { 2254 ret = dev_open(vf_netdev, NULL); 2255 if (ret) 2256 netdev_warn(vf_netdev, 2257 "unable to open: %d\n", ret); 2258 } 2259 } 2260 2261 /* Setup VF as slave of the synthetic device. 2262 * Runs in workqueue to avoid recursion in netlink callbacks. 2263 */ 2264 static void netvsc_vf_setup(struct work_struct *w) 2265 { 2266 struct net_device_context *ndev_ctx 2267 = container_of(w, struct net_device_context, vf_takeover.work); 2268 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2269 struct net_device *vf_netdev; 2270 2271 if (!rtnl_trylock()) { 2272 schedule_delayed_work(&ndev_ctx->vf_takeover, 0); 2273 return; 2274 } 2275 2276 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2277 if (vf_netdev) 2278 __netvsc_vf_setup(ndev, vf_netdev); 2279 2280 rtnl_unlock(); 2281 } 2282 2283 /* Find netvsc by VF serial number. 2284 * The PCI hyperv controller records the serial number as the slot kobj name. 2285 */ 2286 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) 2287 { 2288 struct device *parent = vf_netdev->dev.parent; 2289 struct net_device_context *ndev_ctx; 2290 struct net_device *ndev; 2291 struct pci_dev *pdev; 2292 u32 serial; 2293 2294 if (!parent || !dev_is_pci(parent)) 2295 return NULL; /* not a PCI device */ 2296 2297 pdev = to_pci_dev(parent); 2298 if (!pdev->slot) { 2299 netdev_notice(vf_netdev, "no PCI slot information\n"); 2300 return NULL; 2301 } 2302 2303 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) { 2304 netdev_notice(vf_netdev, "Invalid vf serial:%s\n", 2305 pci_slot_name(pdev->slot)); 2306 return NULL; 2307 } 2308 2309 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { 2310 if (!ndev_ctx->vf_alloc) 2311 continue; 2312 2313 if (ndev_ctx->vf_serial != serial) 2314 continue; 2315 2316 ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2317 if (ndev->addr_len != vf_netdev->addr_len || 2318 memcmp(ndev->perm_addr, vf_netdev->perm_addr, 2319 ndev->addr_len) != 0) 2320 continue; 2321 2322 return ndev; 2323 2324 } 2325 2326 /* Fallback path to check synthetic vf with help of mac addr. 2327 * Because this function can be called before vf_netdev is 2328 * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied 2329 * from dev_addr, also try to match to its dev_addr. 2330 * Note: On Hyper-V and Azure, it's not possible to set a MAC address 2331 * on a VF that matches to the MAC of a unrelated NETVSC device. 2332 */ 2333 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { 2334 ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2335 if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) || 2336 ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr)) 2337 return ndev; 2338 } 2339 2340 netdev_notice(vf_netdev, 2341 "no netdev found for vf serial:%u\n", serial); 2342 return NULL; 2343 } 2344 2345 static int netvsc_prepare_bonding(struct net_device *vf_netdev) 2346 { 2347 struct net_device *ndev; 2348 2349 ndev = get_netvsc_byslot(vf_netdev); 2350 if (!ndev) 2351 return NOTIFY_DONE; 2352 2353 /* set slave flag before open to prevent IPv6 addrconf */ 2354 vf_netdev->flags |= IFF_SLAVE; 2355 return NOTIFY_DONE; 2356 } 2357 2358 static int netvsc_register_vf(struct net_device *vf_netdev, int context) 2359 { 2360 struct net_device_context *net_device_ctx; 2361 struct netvsc_device *netvsc_dev; 2362 struct bpf_prog *prog; 2363 struct net_device *ndev; 2364 int ret; 2365 2366 if (vf_netdev->addr_len != ETH_ALEN) 2367 return NOTIFY_DONE; 2368 2369 ndev = get_netvsc_byslot(vf_netdev); 2370 if (!ndev) 2371 return NOTIFY_DONE; 2372 2373 net_device_ctx = netdev_priv(ndev); 2374 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 2375 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 2376 return NOTIFY_DONE; 2377 2378 /* if synthetic interface is a different namespace, 2379 * then move the VF to that namespace; join will be 2380 * done again in that context. 2381 */ 2382 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { 2383 ret = dev_change_net_namespace(vf_netdev, 2384 dev_net(ndev), "eth%d"); 2385 if (ret) 2386 netdev_err(vf_netdev, 2387 "could not move to same namespace as %s: %d\n", 2388 ndev->name, ret); 2389 else 2390 netdev_info(vf_netdev, 2391 "VF moved to namespace with: %s\n", 2392 ndev->name); 2393 return NOTIFY_DONE; 2394 } 2395 2396 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 2397 2398 if (netvsc_vf_join(vf_netdev, ndev, context) != 0) 2399 return NOTIFY_DONE; 2400 2401 dev_hold(vf_netdev); 2402 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); 2403 2404 if (ndev->needed_headroom < vf_netdev->needed_headroom) 2405 ndev->needed_headroom = vf_netdev->needed_headroom; 2406 2407 vf_netdev->wanted_features = ndev->features; 2408 netdev_update_features(vf_netdev); 2409 2410 prog = netvsc_xdp_get(netvsc_dev); 2411 netvsc_vf_setxdp(vf_netdev, prog); 2412 2413 return NOTIFY_OK; 2414 } 2415 2416 /* Change the data path when VF UP/DOWN/CHANGE are detected. 2417 * 2418 * Typically a UP or DOWN event is followed by a CHANGE event, so 2419 * net_device_ctx->data_path_is_vf is used to cache the current data path 2420 * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate 2421 * message. 2422 * 2423 * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network 2424 * interface, there is only the CHANGE event and no UP or DOWN event. 2425 */ 2426 static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) 2427 { 2428 struct net_device_context *net_device_ctx; 2429 struct netvsc_device *netvsc_dev; 2430 struct net_device *ndev; 2431 bool vf_is_up = false; 2432 int ret; 2433 2434 if (event != NETDEV_GOING_DOWN) 2435 vf_is_up = netif_running(vf_netdev); 2436 2437 ndev = get_netvsc_byref(vf_netdev); 2438 if (!ndev) 2439 return NOTIFY_DONE; 2440 2441 net_device_ctx = netdev_priv(ndev); 2442 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 2443 if (!netvsc_dev) 2444 return NOTIFY_DONE; 2445 2446 if (net_device_ctx->data_path_is_vf == vf_is_up) 2447 return NOTIFY_OK; 2448 2449 if (vf_is_up && !net_device_ctx->vf_alloc) { 2450 netdev_info(ndev, "Waiting for the VF association from host\n"); 2451 wait_for_completion(&net_device_ctx->vf_add); 2452 } 2453 2454 ret = netvsc_switch_datapath(ndev, vf_is_up); 2455 2456 if (ret) { 2457 netdev_err(ndev, 2458 "Data path failed to switch %s VF: %s, err: %d\n", 2459 vf_is_up ? "to" : "from", vf_netdev->name, ret); 2460 return NOTIFY_DONE; 2461 } else { 2462 netdev_info(ndev, "Data path switched %s VF: %s\n", 2463 vf_is_up ? "to" : "from", vf_netdev->name); 2464 } 2465 2466 return NOTIFY_OK; 2467 } 2468 2469 static int netvsc_unregister_vf(struct net_device *vf_netdev) 2470 { 2471 struct net_device *ndev; 2472 struct net_device_context *net_device_ctx; 2473 2474 ndev = get_netvsc_byref(vf_netdev); 2475 if (!ndev) 2476 return NOTIFY_DONE; 2477 2478 net_device_ctx = netdev_priv(ndev); 2479 cancel_delayed_work_sync(&net_device_ctx->vf_takeover); 2480 2481 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 2482 2483 netvsc_vf_setxdp(vf_netdev, NULL); 2484 2485 reinit_completion(&net_device_ctx->vf_add); 2486 netdev_rx_handler_unregister(vf_netdev); 2487 netdev_upper_dev_unlink(vf_netdev, ndev); 2488 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 2489 dev_put(vf_netdev); 2490 2491 ndev->needed_headroom = RNDIS_AND_PPI_SIZE; 2492 2493 return NOTIFY_OK; 2494 } 2495 2496 static int check_dev_is_matching_vf(struct net_device *event_ndev) 2497 { 2498 /* Skip NetVSC interfaces */ 2499 if (event_ndev->netdev_ops == &device_ops) 2500 return -ENODEV; 2501 2502 /* Avoid non-Ethernet type devices */ 2503 if (event_ndev->type != ARPHRD_ETHER) 2504 return -ENODEV; 2505 2506 /* Avoid Vlan dev with same MAC registering as VF */ 2507 if (is_vlan_dev(event_ndev)) 2508 return -ENODEV; 2509 2510 /* Avoid Bonding master dev with same MAC registering as VF */ 2511 if (netif_is_bond_master(event_ndev)) 2512 return -ENODEV; 2513 2514 return 0; 2515 } 2516 2517 static int netvsc_probe(struct hv_device *dev, 2518 const struct hv_vmbus_device_id *dev_id) 2519 { 2520 struct net_device *net = NULL, *vf_netdev; 2521 struct net_device_context *net_device_ctx; 2522 struct netvsc_device_info *device_info = NULL; 2523 struct netvsc_device *nvdev; 2524 int ret = -ENOMEM; 2525 2526 net = alloc_etherdev_mq(sizeof(struct net_device_context), 2527 VRSS_CHANNEL_MAX); 2528 if (!net) 2529 goto no_net; 2530 2531 netif_carrier_off(net); 2532 2533 netvsc_init_settings(net); 2534 2535 net_device_ctx = netdev_priv(net); 2536 net_device_ctx->device_ctx = dev; 2537 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); 2538 if (netif_msg_probe(net_device_ctx)) 2539 netdev_dbg(net, "netvsc msg_enable: %d\n", 2540 net_device_ctx->msg_enable); 2541 2542 hv_set_drvdata(dev, net); 2543 2544 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 2545 2546 init_completion(&net_device_ctx->vf_add); 2547 spin_lock_init(&net_device_ctx->lock); 2548 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 2549 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); 2550 2551 net_device_ctx->vf_stats 2552 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); 2553 if (!net_device_ctx->vf_stats) 2554 goto no_stats; 2555 2556 net->netdev_ops = &device_ops; 2557 net->ethtool_ops = ðtool_ops; 2558 SET_NETDEV_DEV(net, &dev->device); 2559 dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1); 2560 2561 /* We always need headroom for rndis header */ 2562 net->needed_headroom = RNDIS_AND_PPI_SIZE; 2563 2564 /* Initialize the number of queues to be 1, we may change it if more 2565 * channels are offered later. 2566 */ 2567 netif_set_real_num_tx_queues(net, 1); 2568 netif_set_real_num_rx_queues(net, 1); 2569 2570 /* Notify the netvsc driver of the new device */ 2571 device_info = netvsc_devinfo_get(NULL); 2572 2573 if (!device_info) { 2574 ret = -ENOMEM; 2575 goto devinfo_failed; 2576 } 2577 2578 /* We must get rtnl lock before scheduling nvdev->subchan_work, 2579 * otherwise netvsc_subchan_work() can get rtnl lock first and wait 2580 * all subchannels to show up, but that may not happen because 2581 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() 2582 * -> ... -> device_add() -> ... -> __device_attach() can't get 2583 * the device lock, so all the subchannels can't be processed -- 2584 * finally netvsc_subchan_work() hangs forever. 2585 * 2586 * The rtnl lock also needs to be held before rndis_filter_device_add() 2587 * which advertises nvsp_2_vsc_capability / sriov bit, and triggers 2588 * VF NIC offering and registering. If VF NIC finished register_netdev() 2589 * earlier it may cause name based config failure. 2590 */ 2591 rtnl_lock(); 2592 2593 nvdev = rndis_filter_device_add(dev, device_info); 2594 if (IS_ERR(nvdev)) { 2595 ret = PTR_ERR(nvdev); 2596 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2597 goto rndis_failed; 2598 } 2599 2600 eth_hw_addr_set(net, device_info->mac_adr); 2601 2602 if (nvdev->num_chn > 1) 2603 schedule_work(&nvdev->subchan_work); 2604 2605 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2606 net->features = net->hw_features | 2607 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | 2608 NETIF_F_HW_VLAN_CTAG_RX; 2609 net->vlan_features = net->features; 2610 2611 netdev_lockdep_set_classes(net); 2612 2613 net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 2614 NETDEV_XDP_ACT_NDO_XMIT; 2615 2616 /* MTU range: 68 - 1500 or 65521 */ 2617 net->min_mtu = NETVSC_MTU_MIN; 2618 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 2619 net->max_mtu = NETVSC_MTU - ETH_HLEN; 2620 else 2621 net->max_mtu = ETH_DATA_LEN; 2622 2623 nvdev->tx_disable = false; 2624 2625 ret = register_netdevice(net); 2626 if (ret != 0) { 2627 pr_err("Unable to register netdev.\n"); 2628 goto register_failed; 2629 } 2630 2631 list_add(&net_device_ctx->list, &netvsc_dev_list); 2632 2633 /* When the hv_netvsc driver is unloaded and reloaded, the 2634 * NET_DEVICE_REGISTER for the vf device is replayed before probe 2635 * is complete. This is because register_netdevice_notifier() gets 2636 * registered before vmbus_driver_register() so that callback func 2637 * is set before probe and we don't miss events like NETDEV_POST_INIT 2638 * So, in this section we try to register the matching vf device that 2639 * is present as a netdevice, knowing that its register call is not 2640 * processed in the netvsc_netdev_notifier(as probing is progress and 2641 * get_netvsc_byslot fails). 2642 */ 2643 for_each_netdev(dev_net(net), vf_netdev) { 2644 ret = check_dev_is_matching_vf(vf_netdev); 2645 if (ret != 0) 2646 continue; 2647 2648 if (net != get_netvsc_byslot(vf_netdev)) 2649 continue; 2650 2651 netvsc_prepare_bonding(vf_netdev); 2652 netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); 2653 __netvsc_vf_setup(net, vf_netdev); 2654 break; 2655 } 2656 rtnl_unlock(); 2657 2658 netvsc_devinfo_put(device_info); 2659 return 0; 2660 2661 register_failed: 2662 rndis_filter_device_remove(dev, nvdev); 2663 rndis_failed: 2664 rtnl_unlock(); 2665 netvsc_devinfo_put(device_info); 2666 devinfo_failed: 2667 free_percpu(net_device_ctx->vf_stats); 2668 no_stats: 2669 hv_set_drvdata(dev, NULL); 2670 free_netdev(net); 2671 no_net: 2672 return ret; 2673 } 2674 2675 static void netvsc_remove(struct hv_device *dev) 2676 { 2677 struct net_device_context *ndev_ctx; 2678 struct net_device *vf_netdev, *net; 2679 struct netvsc_device *nvdev; 2680 2681 net = hv_get_drvdata(dev); 2682 if (net == NULL) { 2683 dev_err(&dev->device, "No net device to remove\n"); 2684 return; 2685 } 2686 2687 ndev_ctx = netdev_priv(net); 2688 2689 cancel_delayed_work_sync(&ndev_ctx->dwork); 2690 2691 rtnl_lock(); 2692 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2693 if (nvdev) { 2694 cancel_work_sync(&nvdev->subchan_work); 2695 netvsc_xdp_set(net, NULL, NULL, nvdev); 2696 } 2697 2698 /* 2699 * Call to the vsc driver to let it know that the device is being 2700 * removed. Also blocks mtu and channel changes. 2701 */ 2702 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2703 if (vf_netdev) 2704 netvsc_unregister_vf(vf_netdev); 2705 2706 if (nvdev) 2707 rndis_filter_device_remove(dev, nvdev); 2708 2709 unregister_netdevice(net); 2710 list_del(&ndev_ctx->list); 2711 2712 rtnl_unlock(); 2713 2714 hv_set_drvdata(dev, NULL); 2715 2716 free_percpu(ndev_ctx->vf_stats); 2717 free_netdev(net); 2718 } 2719 2720 static int netvsc_suspend(struct hv_device *dev) 2721 { 2722 struct net_device_context *ndev_ctx; 2723 struct netvsc_device *nvdev; 2724 struct net_device *net; 2725 int ret; 2726 2727 net = hv_get_drvdata(dev); 2728 2729 ndev_ctx = netdev_priv(net); 2730 cancel_delayed_work_sync(&ndev_ctx->dwork); 2731 2732 rtnl_lock(); 2733 2734 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2735 if (nvdev == NULL) { 2736 ret = -ENODEV; 2737 goto out; 2738 } 2739 2740 /* Save the current config info */ 2741 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); 2742 if (!ndev_ctx->saved_netvsc_dev_info) { 2743 ret = -ENOMEM; 2744 goto out; 2745 } 2746 ret = netvsc_detach(net, nvdev); 2747 out: 2748 rtnl_unlock(); 2749 2750 return ret; 2751 } 2752 2753 static int netvsc_resume(struct hv_device *dev) 2754 { 2755 struct net_device *net = hv_get_drvdata(dev); 2756 struct net_device_context *net_device_ctx; 2757 struct netvsc_device_info *device_info; 2758 int ret; 2759 2760 rtnl_lock(); 2761 2762 net_device_ctx = netdev_priv(net); 2763 2764 /* Reset the data path to the netvsc NIC before re-opening the vmbus 2765 * channel. Later netvsc_netdev_event() will switch the data path to 2766 * the VF upon the UP or CHANGE event. 2767 */ 2768 net_device_ctx->data_path_is_vf = false; 2769 device_info = net_device_ctx->saved_netvsc_dev_info; 2770 2771 ret = netvsc_attach(net, device_info); 2772 2773 netvsc_devinfo_put(device_info); 2774 net_device_ctx->saved_netvsc_dev_info = NULL; 2775 2776 rtnl_unlock(); 2777 2778 return ret; 2779 } 2780 static const struct hv_vmbus_device_id id_table[] = { 2781 /* Network guid */ 2782 { HV_NIC_GUID, }, 2783 { }, 2784 }; 2785 2786 MODULE_DEVICE_TABLE(vmbus, id_table); 2787 2788 /* The one and only one */ 2789 static struct hv_driver netvsc_drv = { 2790 .name = KBUILD_MODNAME, 2791 .id_table = id_table, 2792 .probe = netvsc_probe, 2793 .remove = netvsc_remove, 2794 .suspend = netvsc_suspend, 2795 .resume = netvsc_resume, 2796 .driver = { 2797 .probe_type = PROBE_FORCE_SYNCHRONOUS, 2798 }, 2799 }; 2800 2801 /* 2802 * On Hyper-V, every VF interface is matched with a corresponding 2803 * synthetic interface. The synthetic interface is presented first 2804 * to the guest. When the corresponding VF instance is registered, 2805 * we will take care of switching the data path. 2806 */ 2807 static int netvsc_netdev_event(struct notifier_block *this, 2808 unsigned long event, void *ptr) 2809 { 2810 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2811 int ret = 0; 2812 2813 ret = check_dev_is_matching_vf(event_dev); 2814 if (ret != 0) 2815 return NOTIFY_DONE; 2816 2817 switch (event) { 2818 case NETDEV_POST_INIT: 2819 return netvsc_prepare_bonding(event_dev); 2820 case NETDEV_REGISTER: 2821 return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); 2822 case NETDEV_UNREGISTER: 2823 return netvsc_unregister_vf(event_dev); 2824 case NETDEV_UP: 2825 case NETDEV_DOWN: 2826 case NETDEV_CHANGE: 2827 case NETDEV_GOING_DOWN: 2828 return netvsc_vf_changed(event_dev, event); 2829 default: 2830 return NOTIFY_DONE; 2831 } 2832 } 2833 2834 static struct notifier_block netvsc_netdev_notifier = { 2835 .notifier_call = netvsc_netdev_event, 2836 }; 2837 2838 static void __exit netvsc_drv_exit(void) 2839 { 2840 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2841 vmbus_driver_unregister(&netvsc_drv); 2842 } 2843 2844 static int __init netvsc_drv_init(void) 2845 { 2846 int ret; 2847 2848 if (ring_size < RING_SIZE_MIN) { 2849 ring_size = RING_SIZE_MIN; 2850 pr_info("Increased ring_size to %u (min allowed)\n", 2851 ring_size); 2852 } 2853 netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096); 2854 2855 register_netdevice_notifier(&netvsc_netdev_notifier); 2856 2857 ret = vmbus_driver_register(&netvsc_drv); 2858 if (ret) 2859 goto err_vmbus_reg; 2860 2861 return 0; 2862 2863 err_vmbus_reg: 2864 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2865 return ret; 2866 } 2867 2868 MODULE_LICENSE("GPL"); 2869 MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 2870 2871 module_init(netvsc_drv_init); 2872 module_exit(netvsc_drv_exit); 2873