1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 */ 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/atomic.h> 13 #include <linux/ethtool.h> 14 #include <linux/module.h> 15 #include <linux/highmem.h> 16 #include <linux/device.h> 17 #include <linux/io.h> 18 #include <linux/delay.h> 19 #include <linux/netdevice.h> 20 #include <linux/inetdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/pci.h> 23 #include <linux/skbuff.h> 24 #include <linux/if_vlan.h> 25 #include <linux/in.h> 26 #include <linux/slab.h> 27 #include <linux/rtnetlink.h> 28 #include <linux/netpoll.h> 29 #include <linux/bpf.h> 30 31 #include <net/arp.h> 32 #include <net/route.h> 33 #include <net/sock.h> 34 #include <net/pkt_sched.h> 35 #include <net/checksum.h> 36 #include <net/ip6_checksum.h> 37 38 #include "hyperv_net.h" 39 40 #define RING_SIZE_MIN 64 41 #define RETRY_US_LO 5000 42 #define RETRY_US_HI 10000 43 #define RETRY_MAX 2000 /* >10 sec */ 44 45 #define LINKCHANGE_INT (2 * HZ) 46 #define VF_TAKEOVER_INT (HZ / 10) 47 48 static unsigned int ring_size __ro_after_init = 128; 49 module_param(ring_size, uint, 0444); 50 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 51 unsigned int netvsc_ring_bytes __ro_after_init; 52 53 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 54 NETIF_MSG_LINK | NETIF_MSG_IFUP | 55 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | 56 NETIF_MSG_TX_ERR; 57 58 static int debug = -1; 59 module_param(debug, int, 0444); 60 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 61 62 static LIST_HEAD(netvsc_dev_list); 63 64 static void netvsc_change_rx_flags(struct net_device *net, int change) 65 { 66 struct net_device_context *ndev_ctx = netdev_priv(net); 67 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 68 int inc; 69 70 if (!vf_netdev) 71 return; 72 73 if (change & IFF_PROMISC) { 74 inc = (net->flags & IFF_PROMISC) ? 1 : -1; 75 dev_set_promiscuity(vf_netdev, inc); 76 } 77 78 if (change & IFF_ALLMULTI) { 79 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; 80 dev_set_allmulti(vf_netdev, inc); 81 } 82 } 83 84 static void netvsc_set_rx_mode(struct net_device *net) 85 { 86 struct net_device_context *ndev_ctx = netdev_priv(net); 87 struct net_device *vf_netdev; 88 struct netvsc_device *nvdev; 89 90 rcu_read_lock(); 91 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); 92 if (vf_netdev) { 93 dev_uc_sync(vf_netdev, net); 94 dev_mc_sync(vf_netdev, net); 95 } 96 97 nvdev = rcu_dereference(ndev_ctx->nvdev); 98 if (nvdev) 99 rndis_filter_update(nvdev); 100 rcu_read_unlock(); 101 } 102 103 static void netvsc_tx_enable(struct netvsc_device *nvscdev, 104 struct net_device *ndev) 105 { 106 nvscdev->tx_disable = false; 107 virt_wmb(); /* ensure queue wake up mechanism is on */ 108 109 netif_tx_wake_all_queues(ndev); 110 } 111 112 static int netvsc_open(struct net_device *net) 113 { 114 struct net_device_context *ndev_ctx = netdev_priv(net); 115 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 116 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 117 struct rndis_device *rdev; 118 int ret = 0; 119 120 netif_carrier_off(net); 121 122 /* Open up the device */ 123 ret = rndis_filter_open(nvdev); 124 if (ret != 0) { 125 netdev_err(net, "unable to open device (ret %d).\n", ret); 126 return ret; 127 } 128 129 rdev = nvdev->extension; 130 if (!rdev->link_state) { 131 netif_carrier_on(net); 132 netvsc_tx_enable(nvdev, net); 133 } 134 135 if (vf_netdev) { 136 /* Setting synthetic device up transparently sets 137 * slave as up. If open fails, then slave will be 138 * still be offline (and not used). 139 */ 140 ret = dev_open(vf_netdev, NULL); 141 if (ret) 142 netdev_warn(net, 143 "unable to open slave: %s: %d\n", 144 vf_netdev->name, ret); 145 } 146 return 0; 147 } 148 149 static int netvsc_wait_until_empty(struct netvsc_device *nvdev) 150 { 151 unsigned int retry = 0; 152 int i; 153 154 /* Ensure pending bytes in ring are read */ 155 for (;;) { 156 u32 aread = 0; 157 158 for (i = 0; i < nvdev->num_chn; i++) { 159 struct vmbus_channel *chn 160 = nvdev->chan_table[i].channel; 161 162 if (!chn) 163 continue; 164 165 /* make sure receive not running now */ 166 napi_synchronize(&nvdev->chan_table[i].napi); 167 168 aread = hv_get_bytes_to_read(&chn->inbound); 169 if (aread) 170 break; 171 172 aread = hv_get_bytes_to_read(&chn->outbound); 173 if (aread) 174 break; 175 } 176 177 if (aread == 0) 178 return 0; 179 180 if (++retry > RETRY_MAX) 181 return -ETIMEDOUT; 182 183 usleep_range(RETRY_US_LO, RETRY_US_HI); 184 } 185 } 186 187 static void netvsc_tx_disable(struct netvsc_device *nvscdev, 188 struct net_device *ndev) 189 { 190 if (nvscdev) { 191 nvscdev->tx_disable = true; 192 virt_wmb(); /* ensure txq will not wake up after stop */ 193 } 194 195 netif_tx_disable(ndev); 196 } 197 198 static int netvsc_close(struct net_device *net) 199 { 200 struct net_device_context *net_device_ctx = netdev_priv(net); 201 struct net_device *vf_netdev 202 = rtnl_dereference(net_device_ctx->vf_netdev); 203 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 204 int ret; 205 206 netvsc_tx_disable(nvdev, net); 207 208 /* No need to close rndis filter if it is removed already */ 209 if (!nvdev) 210 return 0; 211 212 ret = rndis_filter_close(nvdev); 213 if (ret != 0) { 214 netdev_err(net, "unable to close device (ret %d).\n", ret); 215 return ret; 216 } 217 218 ret = netvsc_wait_until_empty(nvdev); 219 if (ret) 220 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 221 222 if (vf_netdev) 223 dev_close(vf_netdev); 224 225 return ret; 226 } 227 228 static inline void *init_ppi_data(struct rndis_message *msg, 229 u32 ppi_size, u32 pkt_type) 230 { 231 struct rndis_packet *rndis_pkt = &msg->msg.pkt; 232 struct rndis_per_packet_info *ppi; 233 234 rndis_pkt->data_offset += ppi_size; 235 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset 236 + rndis_pkt->per_pkt_info_len; 237 238 ppi->size = ppi_size; 239 ppi->type = pkt_type; 240 ppi->internal = 0; 241 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 242 243 rndis_pkt->per_pkt_info_len += ppi_size; 244 245 return ppi + 1; 246 } 247 248 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented 249 * packets. We can use ethtool to change UDP hash level when necessary. 250 */ 251 static inline u32 netvsc_get_hash( 252 struct sk_buff *skb, 253 const struct net_device_context *ndc) 254 { 255 struct flow_keys flow; 256 u32 hash, pkt_proto = 0; 257 static u32 hashrnd __read_mostly; 258 259 net_get_random_once(&hashrnd, sizeof(hashrnd)); 260 261 if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) 262 return 0; 263 264 switch (flow.basic.ip_proto) { 265 case IPPROTO_TCP: 266 if (flow.basic.n_proto == htons(ETH_P_IP)) 267 pkt_proto = HV_TCP4_L4HASH; 268 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 269 pkt_proto = HV_TCP6_L4HASH; 270 271 break; 272 273 case IPPROTO_UDP: 274 if (flow.basic.n_proto == htons(ETH_P_IP)) 275 pkt_proto = HV_UDP4_L4HASH; 276 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 277 pkt_proto = HV_UDP6_L4HASH; 278 279 break; 280 } 281 282 if (pkt_proto & ndc->l4_hash) { 283 return skb_get_hash(skb); 284 } else { 285 if (flow.basic.n_proto == htons(ETH_P_IP)) 286 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); 287 else if (flow.basic.n_proto == htons(ETH_P_IPV6)) 288 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); 289 else 290 return 0; 291 292 __skb_set_sw_hash(skb, hash, false); 293 } 294 295 return hash; 296 } 297 298 static inline int netvsc_get_tx_queue(struct net_device *ndev, 299 struct sk_buff *skb, int old_idx) 300 { 301 const struct net_device_context *ndc = netdev_priv(ndev); 302 struct sock *sk = skb->sk; 303 int q_idx; 304 305 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & 306 (VRSS_SEND_TAB_SIZE - 1)]; 307 308 /* If queue index changed record the new value */ 309 if (q_idx != old_idx && 310 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 311 sk_tx_queue_set(sk, q_idx); 312 313 return q_idx; 314 } 315 316 /* 317 * Select queue for transmit. 318 * 319 * If a valid queue has already been assigned, then use that. 320 * Otherwise compute tx queue based on hash and the send table. 321 * 322 * This is basically similar to default (netdev_pick_tx) with the added step 323 * of using the host send_table when no other queue has been assigned. 324 * 325 * TODO support XPS - but get_xps_queue not exported 326 */ 327 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) 328 { 329 int q_idx = sk_tx_queue_get(skb->sk); 330 331 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { 332 /* If forwarding a packet, we use the recorded queue when 333 * available for better cache locality. 334 */ 335 if (skb_rx_queue_recorded(skb)) 336 q_idx = skb_get_rx_queue(skb); 337 else 338 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); 339 } 340 341 return q_idx; 342 } 343 344 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 345 struct net_device *sb_dev) 346 { 347 struct net_device_context *ndc = netdev_priv(ndev); 348 struct net_device *vf_netdev; 349 u16 txq; 350 351 rcu_read_lock(); 352 vf_netdev = rcu_dereference(ndc->vf_netdev); 353 if (vf_netdev) { 354 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; 355 356 if (vf_ops->ndo_select_queue) 357 txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev); 358 else 359 txq = netdev_pick_tx(vf_netdev, skb, NULL); 360 361 /* Record the queue selected by VF so that it can be 362 * used for common case where VF has more queues than 363 * the synthetic device. 364 */ 365 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; 366 } else { 367 txq = netvsc_pick_tx(ndev, skb); 368 } 369 rcu_read_unlock(); 370 371 while (txq >= ndev->real_num_tx_queues) 372 txq -= ndev->real_num_tx_queues; 373 374 return txq; 375 } 376 377 static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len, 378 struct hv_page_buffer *pb) 379 { 380 int j = 0; 381 382 hvpfn += offset >> HV_HYP_PAGE_SHIFT; 383 offset = offset & ~HV_HYP_PAGE_MASK; 384 385 while (len > 0) { 386 unsigned long bytes; 387 388 bytes = HV_HYP_PAGE_SIZE - offset; 389 if (bytes > len) 390 bytes = len; 391 pb[j].pfn = hvpfn; 392 pb[j].offset = offset; 393 pb[j].len = bytes; 394 395 offset += bytes; 396 len -= bytes; 397 398 if (offset == HV_HYP_PAGE_SIZE && len) { 399 hvpfn++; 400 offset = 0; 401 j++; 402 } 403 } 404 405 return j + 1; 406 } 407 408 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 409 struct hv_netvsc_packet *packet, 410 struct hv_page_buffer *pb) 411 { 412 u32 slots_used = 0; 413 char *data = skb->data; 414 int frags = skb_shinfo(skb)->nr_frags; 415 int i; 416 417 /* The packet is laid out thus: 418 * 1. hdr: RNDIS header and PPI 419 * 2. skb linear data 420 * 3. skb fragment data 421 */ 422 slots_used += fill_pg_buf(virt_to_hvpfn(hdr), 423 offset_in_hvpage(hdr), 424 len, 425 &pb[slots_used]); 426 427 packet->rmsg_size = len; 428 packet->rmsg_pgcnt = slots_used; 429 430 slots_used += fill_pg_buf(virt_to_hvpfn(data), 431 offset_in_hvpage(data), 432 skb_headlen(skb), 433 &pb[slots_used]); 434 435 for (i = 0; i < frags; i++) { 436 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 437 438 slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)), 439 skb_frag_off(frag), 440 skb_frag_size(frag), 441 &pb[slots_used]); 442 } 443 return slots_used; 444 } 445 446 static int count_skb_frag_slots(struct sk_buff *skb) 447 { 448 int i, frags = skb_shinfo(skb)->nr_frags; 449 int pages = 0; 450 451 for (i = 0; i < frags; i++) { 452 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 453 unsigned long size = skb_frag_size(frag); 454 unsigned long offset = skb_frag_off(frag); 455 456 /* Skip unused frames from start of page */ 457 offset &= ~HV_HYP_PAGE_MASK; 458 pages += HVPFN_UP(offset + size); 459 } 460 return pages; 461 } 462 463 static int netvsc_get_slots(struct sk_buff *skb) 464 { 465 char *data = skb->data; 466 unsigned int offset = offset_in_hvpage(data); 467 unsigned int len = skb_headlen(skb); 468 int slots; 469 int frag_slots; 470 471 slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE); 472 frag_slots = count_skb_frag_slots(skb); 473 return slots + frag_slots; 474 } 475 476 static u32 net_checksum_info(struct sk_buff *skb) 477 { 478 if (skb->protocol == htons(ETH_P_IP)) { 479 struct iphdr *ip = ip_hdr(skb); 480 481 if (ip->protocol == IPPROTO_TCP) 482 return TRANSPORT_INFO_IPV4_TCP; 483 else if (ip->protocol == IPPROTO_UDP) 484 return TRANSPORT_INFO_IPV4_UDP; 485 } else { 486 struct ipv6hdr *ip6 = ipv6_hdr(skb); 487 488 if (ip6->nexthdr == IPPROTO_TCP) 489 return TRANSPORT_INFO_IPV6_TCP; 490 else if (ip6->nexthdr == IPPROTO_UDP) 491 return TRANSPORT_INFO_IPV6_UDP; 492 } 493 494 return TRANSPORT_INFO_NOT_IP; 495 } 496 497 /* Send skb on the slave VF device. */ 498 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, 499 struct sk_buff *skb) 500 { 501 struct net_device_context *ndev_ctx = netdev_priv(net); 502 unsigned int len = skb->len; 503 int rc; 504 505 skb->dev = vf_netdev; 506 skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); 507 508 rc = dev_queue_xmit(skb); 509 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { 510 struct netvsc_vf_pcpu_stats *pcpu_stats 511 = this_cpu_ptr(ndev_ctx->vf_stats); 512 513 u64_stats_update_begin(&pcpu_stats->syncp); 514 pcpu_stats->tx_packets++; 515 pcpu_stats->tx_bytes += len; 516 u64_stats_update_end(&pcpu_stats->syncp); 517 } else { 518 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); 519 } 520 521 return rc; 522 } 523 524 static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) 525 { 526 struct net_device_context *net_device_ctx = netdev_priv(net); 527 struct hv_netvsc_packet *packet = NULL; 528 int ret; 529 unsigned int num_data_pgs; 530 struct rndis_message *rndis_msg; 531 struct net_device *vf_netdev; 532 u32 rndis_msg_size; 533 u32 hash; 534 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; 535 536 /* If VF is present and up then redirect packets to it. 537 * Skip the VF if it is marked down or has no carrier. 538 * If netpoll is in uses, then VF can not be used either. 539 */ 540 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); 541 if (vf_netdev && netif_running(vf_netdev) && 542 netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) && 543 net_device_ctx->data_path_is_vf) 544 return netvsc_vf_xmit(net, vf_netdev, skb); 545 546 /* We will atmost need two pages to describe the rndis 547 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 548 * of pages in a single packet. If skb is scattered around 549 * more pages we try linearizing it. 550 */ 551 552 num_data_pgs = netvsc_get_slots(skb) + 2; 553 554 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 555 ++net_device_ctx->eth_stats.tx_scattered; 556 557 if (skb_linearize(skb)) 558 goto no_memory; 559 560 num_data_pgs = netvsc_get_slots(skb) + 2; 561 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 562 ++net_device_ctx->eth_stats.tx_too_big; 563 goto drop; 564 } 565 } 566 567 /* 568 * Place the rndis header in the skb head room and 569 * the skb->cb will be used for hv_netvsc_packet 570 * structure. 571 */ 572 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); 573 if (ret) 574 goto no_memory; 575 576 /* Use the skb control buffer for building up the packet */ 577 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > 578 sizeof_field(struct sk_buff, cb)); 579 packet = (struct hv_netvsc_packet *)skb->cb; 580 581 packet->q_idx = skb_get_queue_mapping(skb); 582 583 packet->total_data_buflen = skb->len; 584 packet->total_bytes = skb->len; 585 packet->total_packets = 1; 586 587 rndis_msg = (struct rndis_message *)skb->head; 588 589 /* Add the rndis header */ 590 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 591 rndis_msg->msg_len = packet->total_data_buflen; 592 593 rndis_msg->msg.pkt = (struct rndis_packet) { 594 .data_offset = sizeof(struct rndis_packet), 595 .data_len = packet->total_data_buflen, 596 .per_pkt_info_offset = sizeof(struct rndis_packet), 597 }; 598 599 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 600 601 hash = skb_get_hash_raw(skb); 602 if (hash != 0 && net->real_num_tx_queues > 1) { 603 u32 *hash_info; 604 605 rndis_msg_size += NDIS_HASH_PPI_SIZE; 606 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 607 NBL_HASH_VALUE); 608 *hash_info = hash; 609 } 610 611 /* When using AF_PACKET we need to drop VLAN header from 612 * the frame and update the SKB to allow the HOST OS 613 * to transmit the 802.1Q packet 614 */ 615 if (skb->protocol == htons(ETH_P_8021Q)) { 616 u16 vlan_tci; 617 618 skb_reset_mac_header(skb); 619 if (eth_type_vlan(eth_hdr(skb)->h_proto)) { 620 if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) { 621 ++net_device_ctx->eth_stats.vlan_error; 622 goto drop; 623 } 624 625 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 626 /* Update the NDIS header pkt lengths */ 627 packet->total_data_buflen -= VLAN_HLEN; 628 packet->total_bytes -= VLAN_HLEN; 629 rndis_msg->msg_len = packet->total_data_buflen; 630 rndis_msg->msg.pkt.data_len = packet->total_data_buflen; 631 } 632 } 633 634 if (skb_vlan_tag_present(skb)) { 635 struct ndis_pkt_8021q_info *vlan; 636 637 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 638 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 639 IEEE_8021Q_INFO); 640 641 vlan->value = 0; 642 vlan->vlanid = skb_vlan_tag_get_id(skb); 643 vlan->cfi = skb_vlan_tag_get_cfi(skb); 644 vlan->pri = skb_vlan_tag_get_prio(skb); 645 } 646 647 if (skb_is_gso(skb)) { 648 struct ndis_tcp_lso_info *lso_info; 649 650 rndis_msg_size += NDIS_LSO_PPI_SIZE; 651 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 652 TCP_LARGESEND_PKTINFO); 653 654 lso_info->value = 0; 655 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 656 if (skb->protocol == htons(ETH_P_IP)) { 657 lso_info->lso_v2_transmit.ip_version = 658 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 659 ip_hdr(skb)->tot_len = 0; 660 ip_hdr(skb)->check = 0; 661 tcp_hdr(skb)->check = 662 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 663 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 664 } else { 665 lso_info->lso_v2_transmit.ip_version = 666 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 667 tcp_v6_gso_csum_prep(skb); 668 } 669 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); 670 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 671 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 672 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { 673 struct ndis_tcp_ip_checksum_info *csum_info; 674 675 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 676 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 677 TCPIP_CHKSUM_PKTINFO); 678 679 csum_info->value = 0; 680 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); 681 682 if (skb->protocol == htons(ETH_P_IP)) { 683 csum_info->transmit.is_ipv4 = 1; 684 685 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 686 csum_info->transmit.tcp_checksum = 1; 687 else 688 csum_info->transmit.udp_checksum = 1; 689 } else { 690 csum_info->transmit.is_ipv6 = 1; 691 692 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 693 csum_info->transmit.tcp_checksum = 1; 694 else 695 csum_info->transmit.udp_checksum = 1; 696 } 697 } else { 698 /* Can't do offload of this type of checksum */ 699 if (skb_checksum_help(skb)) 700 goto drop; 701 } 702 } 703 704 /* Start filling in the page buffers with the rndis hdr */ 705 rndis_msg->msg_len += rndis_msg_size; 706 packet->total_data_buflen = rndis_msg->msg_len; 707 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 708 skb, packet, pb); 709 710 /* timestamp packet in software */ 711 skb_tx_timestamp(skb); 712 713 ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx); 714 if (likely(ret == 0)) 715 return NETDEV_TX_OK; 716 717 if (ret == -EAGAIN) { 718 ++net_device_ctx->eth_stats.tx_busy; 719 return NETDEV_TX_BUSY; 720 } 721 722 if (ret == -ENOSPC) 723 ++net_device_ctx->eth_stats.tx_no_space; 724 725 drop: 726 dev_kfree_skb_any(skb); 727 net->stats.tx_dropped++; 728 729 return NETDEV_TX_OK; 730 731 no_memory: 732 ++net_device_ctx->eth_stats.tx_no_memory; 733 goto drop; 734 } 735 736 static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, 737 struct net_device *ndev) 738 { 739 return netvsc_xmit(skb, ndev, false); 740 } 741 742 /* 743 * netvsc_linkstatus_callback - Link up/down notification 744 */ 745 void netvsc_linkstatus_callback(struct net_device *net, 746 struct rndis_message *resp, 747 void *data, u32 data_buflen) 748 { 749 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 750 struct net_device_context *ndev_ctx = netdev_priv(net); 751 struct netvsc_reconfig *event; 752 unsigned long flags; 753 754 /* Ensure the packet is big enough to access its fields */ 755 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) { 756 netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n", 757 resp->msg_len); 758 return; 759 } 760 761 /* Copy the RNDIS indicate status into nvchan->recv_buf */ 762 memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate)); 763 764 /* Update the physical link speed when changing to another vSwitch */ 765 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { 766 u32 speed; 767 768 /* Validate status_buf_offset and status_buflen. 769 * 770 * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account 771 * for the status buffer field in resp->msg_len; perform the validation 772 * using data_buflen (>= resp->msg_len). 773 */ 774 if (indicate->status_buflen < sizeof(speed) || 775 indicate->status_buf_offset < sizeof(*indicate) || 776 data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset || 777 data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset 778 < indicate->status_buflen) { 779 netdev_err(net, "invalid rndis_indicate_status packet\n"); 780 return; 781 } 782 783 speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000; 784 ndev_ctx->speed = speed; 785 return; 786 } 787 788 /* Handle these link change statuses below */ 789 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && 790 indicate->status != RNDIS_STATUS_MEDIA_CONNECT && 791 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) 792 return; 793 794 if (net->reg_state != NETREG_REGISTERED) 795 return; 796 797 event = kzalloc(sizeof(*event), GFP_ATOMIC); 798 if (!event) 799 return; 800 event->event = indicate->status; 801 802 spin_lock_irqsave(&ndev_ctx->lock, flags); 803 list_add_tail(&event->list, &ndev_ctx->reconfig_events); 804 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 805 806 schedule_delayed_work(&ndev_ctx->dwork, 0); 807 } 808 809 static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) 810 { 811 int rc; 812 813 skb->queue_mapping = skb_get_rx_queue(skb); 814 __skb_push(skb, ETH_HLEN); 815 816 rc = netvsc_xmit(skb, ndev, true); 817 818 if (dev_xmit_complete(rc)) 819 return; 820 821 dev_kfree_skb_any(skb); 822 ndev->stats.tx_dropped++; 823 } 824 825 static void netvsc_comp_ipcsum(struct sk_buff *skb) 826 { 827 struct iphdr *iph = (struct iphdr *)skb->data; 828 829 iph->check = 0; 830 iph->check = ip_fast_csum(iph, iph->ihl); 831 } 832 833 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, 834 struct netvsc_channel *nvchan, 835 struct xdp_buff *xdp) 836 { 837 struct napi_struct *napi = &nvchan->napi; 838 const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan; 839 const struct ndis_tcp_ip_checksum_info *csum_info = 840 &nvchan->rsc.csum_info; 841 const u32 *hash_info = &nvchan->rsc.hash_info; 842 u8 ppi_flags = nvchan->rsc.ppi_flags; 843 struct sk_buff *skb; 844 void *xbuf = xdp->data_hard_start; 845 int i; 846 847 if (xbuf) { 848 unsigned int hdroom = xdp->data - xdp->data_hard_start; 849 unsigned int xlen = xdp->data_end - xdp->data; 850 unsigned int frag_size = xdp->frame_sz; 851 852 skb = build_skb(xbuf, frag_size); 853 854 if (!skb) { 855 __free_page(virt_to_page(xbuf)); 856 return NULL; 857 } 858 859 skb_reserve(skb, hdroom); 860 skb_put(skb, xlen); 861 skb->dev = napi->dev; 862 } else { 863 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen); 864 865 if (!skb) 866 return NULL; 867 868 /* Copy to skb. This copy is needed here since the memory 869 * pointed by hv_netvsc_packet cannot be deallocated. 870 */ 871 for (i = 0; i < nvchan->rsc.cnt; i++) 872 skb_put_data(skb, nvchan->rsc.data[i], 873 nvchan->rsc.len[i]); 874 } 875 876 skb->protocol = eth_type_trans(skb, net); 877 878 /* skb is already created with CHECKSUM_NONE */ 879 skb_checksum_none_assert(skb); 880 881 /* Incoming packets may have IP header checksum verified by the host. 882 * They may not have IP header checksum computed after coalescing. 883 * We compute it here if the flags are set, because on Linux, the IP 884 * checksum is always checked. 885 */ 886 if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid && 887 csum_info->receive.ip_checksum_succeeded && 888 skb->protocol == htons(ETH_P_IP)) { 889 /* Check that there is enough space to hold the IP header. */ 890 if (skb_headlen(skb) < sizeof(struct iphdr)) { 891 kfree_skb(skb); 892 return NULL; 893 } 894 netvsc_comp_ipcsum(skb); 895 } 896 897 /* Do L4 checksum offload if enabled and present. */ 898 if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) { 899 if (csum_info->receive.tcp_checksum_succeeded || 900 csum_info->receive.udp_checksum_succeeded) 901 skb->ip_summed = CHECKSUM_UNNECESSARY; 902 } 903 904 if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH)) 905 skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4); 906 907 if (ppi_flags & NVSC_RSC_VLAN) { 908 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) | 909 (vlan->cfi ? VLAN_CFI_MASK : 0); 910 911 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 912 vlan_tci); 913 } 914 915 return skb; 916 } 917 918 /* 919 * netvsc_recv_callback - Callback when we receive a packet from the 920 * "wire" on the specified device. 921 */ 922 int netvsc_recv_callback(struct net_device *net, 923 struct netvsc_device *net_device, 924 struct netvsc_channel *nvchan) 925 { 926 struct net_device_context *net_device_ctx = netdev_priv(net); 927 struct vmbus_channel *channel = nvchan->channel; 928 u16 q_idx = channel->offermsg.offer.sub_channel_index; 929 struct sk_buff *skb; 930 struct netvsc_stats *rx_stats = &nvchan->rx_stats; 931 struct xdp_buff xdp; 932 u32 act; 933 934 if (net->reg_state != NETREG_REGISTERED) 935 return NVSP_STAT_FAIL; 936 937 act = netvsc_run_xdp(net, nvchan, &xdp); 938 939 if (act != XDP_PASS && act != XDP_TX) { 940 u64_stats_update_begin(&rx_stats->syncp); 941 rx_stats->xdp_drop++; 942 u64_stats_update_end(&rx_stats->syncp); 943 944 return NVSP_STAT_SUCCESS; /* consumed by XDP */ 945 } 946 947 /* Allocate a skb - TODO direct I/O to pages? */ 948 skb = netvsc_alloc_recv_skb(net, nvchan, &xdp); 949 950 if (unlikely(!skb)) { 951 ++net_device_ctx->eth_stats.rx_no_memory; 952 return NVSP_STAT_FAIL; 953 } 954 955 skb_record_rx_queue(skb, q_idx); 956 957 /* 958 * Even if injecting the packet, record the statistics 959 * on the synthetic device because modifying the VF device 960 * statistics will not work correctly. 961 */ 962 u64_stats_update_begin(&rx_stats->syncp); 963 rx_stats->packets++; 964 rx_stats->bytes += nvchan->rsc.pktlen; 965 966 if (skb->pkt_type == PACKET_BROADCAST) 967 ++rx_stats->broadcast; 968 else if (skb->pkt_type == PACKET_MULTICAST) 969 ++rx_stats->multicast; 970 u64_stats_update_end(&rx_stats->syncp); 971 972 if (act == XDP_TX) { 973 netvsc_xdp_xmit(skb, net); 974 return NVSP_STAT_SUCCESS; 975 } 976 977 napi_gro_receive(&nvchan->napi, skb); 978 return NVSP_STAT_SUCCESS; 979 } 980 981 static void netvsc_get_drvinfo(struct net_device *net, 982 struct ethtool_drvinfo *info) 983 { 984 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 985 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 986 } 987 988 static void netvsc_get_channels(struct net_device *net, 989 struct ethtool_channels *channel) 990 { 991 struct net_device_context *net_device_ctx = netdev_priv(net); 992 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 993 994 if (nvdev) { 995 channel->max_combined = nvdev->max_chn; 996 channel->combined_count = nvdev->num_chn; 997 } 998 } 999 1000 /* Alloc struct netvsc_device_info, and initialize it from either existing 1001 * struct netvsc_device, or from default values. 1002 */ 1003 static 1004 struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev) 1005 { 1006 struct netvsc_device_info *dev_info; 1007 struct bpf_prog *prog; 1008 1009 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); 1010 1011 if (!dev_info) 1012 return NULL; 1013 1014 if (nvdev) { 1015 ASSERT_RTNL(); 1016 1017 dev_info->num_chn = nvdev->num_chn; 1018 dev_info->send_sections = nvdev->send_section_cnt; 1019 dev_info->send_section_size = nvdev->send_section_size; 1020 dev_info->recv_sections = nvdev->recv_section_cnt; 1021 dev_info->recv_section_size = nvdev->recv_section_size; 1022 1023 memcpy(dev_info->rss_key, nvdev->extension->rss_key, 1024 NETVSC_HASH_KEYLEN); 1025 1026 prog = netvsc_xdp_get(nvdev); 1027 if (prog) { 1028 bpf_prog_inc(prog); 1029 dev_info->bprog = prog; 1030 } 1031 } else { 1032 dev_info->num_chn = VRSS_CHANNEL_DEFAULT; 1033 dev_info->send_sections = NETVSC_DEFAULT_TX; 1034 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; 1035 dev_info->recv_sections = NETVSC_DEFAULT_RX; 1036 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; 1037 } 1038 1039 return dev_info; 1040 } 1041 1042 /* Free struct netvsc_device_info */ 1043 static void netvsc_devinfo_put(struct netvsc_device_info *dev_info) 1044 { 1045 if (dev_info->bprog) { 1046 ASSERT_RTNL(); 1047 bpf_prog_put(dev_info->bprog); 1048 } 1049 1050 kfree(dev_info); 1051 } 1052 1053 static int netvsc_detach(struct net_device *ndev, 1054 struct netvsc_device *nvdev) 1055 { 1056 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1057 struct hv_device *hdev = ndev_ctx->device_ctx; 1058 int ret; 1059 1060 /* Don't try continuing to try and setup sub channels */ 1061 if (cancel_work_sync(&nvdev->subchan_work)) 1062 nvdev->num_chn = 1; 1063 1064 netvsc_xdp_set(ndev, NULL, NULL, nvdev); 1065 1066 /* If device was up (receiving) then shutdown */ 1067 if (netif_running(ndev)) { 1068 netvsc_tx_disable(nvdev, ndev); 1069 1070 ret = rndis_filter_close(nvdev); 1071 if (ret) { 1072 netdev_err(ndev, 1073 "unable to close device (ret %d).\n", ret); 1074 return ret; 1075 } 1076 1077 ret = netvsc_wait_until_empty(nvdev); 1078 if (ret) { 1079 netdev_err(ndev, 1080 "Ring buffer not empty after closing rndis\n"); 1081 return ret; 1082 } 1083 } 1084 1085 netif_device_detach(ndev); 1086 1087 rndis_filter_device_remove(hdev, nvdev); 1088 1089 return 0; 1090 } 1091 1092 static int netvsc_attach(struct net_device *ndev, 1093 struct netvsc_device_info *dev_info) 1094 { 1095 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1096 struct hv_device *hdev = ndev_ctx->device_ctx; 1097 struct netvsc_device *nvdev; 1098 struct rndis_device *rdev; 1099 struct bpf_prog *prog; 1100 int ret = 0; 1101 1102 nvdev = rndis_filter_device_add(hdev, dev_info); 1103 if (IS_ERR(nvdev)) 1104 return PTR_ERR(nvdev); 1105 1106 if (nvdev->num_chn > 1) { 1107 ret = rndis_set_subchannel(ndev, nvdev, dev_info); 1108 1109 /* if unavailable, just proceed with one queue */ 1110 if (ret) { 1111 nvdev->max_chn = 1; 1112 nvdev->num_chn = 1; 1113 } 1114 } 1115 1116 prog = dev_info->bprog; 1117 if (prog) { 1118 bpf_prog_inc(prog); 1119 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); 1120 if (ret) { 1121 bpf_prog_put(prog); 1122 goto err1; 1123 } 1124 } 1125 1126 /* In any case device is now ready */ 1127 nvdev->tx_disable = false; 1128 netif_device_attach(ndev); 1129 1130 /* Note: enable and attach happen when sub-channels setup */ 1131 netif_carrier_off(ndev); 1132 1133 if (netif_running(ndev)) { 1134 ret = rndis_filter_open(nvdev); 1135 if (ret) 1136 goto err2; 1137 1138 rdev = nvdev->extension; 1139 if (!rdev->link_state) 1140 netif_carrier_on(ndev); 1141 } 1142 1143 return 0; 1144 1145 err2: 1146 netif_device_detach(ndev); 1147 1148 err1: 1149 rndis_filter_device_remove(hdev, nvdev); 1150 1151 return ret; 1152 } 1153 1154 static int netvsc_set_channels(struct net_device *net, 1155 struct ethtool_channels *channels) 1156 { 1157 struct net_device_context *net_device_ctx = netdev_priv(net); 1158 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 1159 unsigned int orig, count = channels->combined_count; 1160 struct netvsc_device_info *device_info; 1161 int ret; 1162 1163 /* We do not support separate count for rx, tx, or other */ 1164 if (count == 0 || 1165 channels->rx_count || channels->tx_count || channels->other_count) 1166 return -EINVAL; 1167 1168 if (!nvdev || nvdev->destroy) 1169 return -ENODEV; 1170 1171 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) 1172 return -EINVAL; 1173 1174 if (count > nvdev->max_chn) 1175 return -EINVAL; 1176 1177 orig = nvdev->num_chn; 1178 1179 device_info = netvsc_devinfo_get(nvdev); 1180 1181 if (!device_info) 1182 return -ENOMEM; 1183 1184 device_info->num_chn = count; 1185 1186 ret = netvsc_detach(net, nvdev); 1187 if (ret) 1188 goto out; 1189 1190 ret = netvsc_attach(net, device_info); 1191 if (ret) { 1192 device_info->num_chn = orig; 1193 if (netvsc_attach(net, device_info)) 1194 netdev_err(net, "restoring channel setting failed\n"); 1195 } 1196 1197 out: 1198 netvsc_devinfo_put(device_info); 1199 return ret; 1200 } 1201 1202 static void netvsc_init_settings(struct net_device *dev) 1203 { 1204 struct net_device_context *ndc = netdev_priv(dev); 1205 1206 ndc->l4_hash = HV_DEFAULT_L4HASH; 1207 1208 ndc->speed = SPEED_UNKNOWN; 1209 ndc->duplex = DUPLEX_FULL; 1210 1211 dev->features = NETIF_F_LRO; 1212 } 1213 1214 static int netvsc_get_link_ksettings(struct net_device *dev, 1215 struct ethtool_link_ksettings *cmd) 1216 { 1217 struct net_device_context *ndc = netdev_priv(dev); 1218 struct net_device *vf_netdev; 1219 1220 vf_netdev = rtnl_dereference(ndc->vf_netdev); 1221 1222 if (vf_netdev) 1223 return __ethtool_get_link_ksettings(vf_netdev, cmd); 1224 1225 cmd->base.speed = ndc->speed; 1226 cmd->base.duplex = ndc->duplex; 1227 cmd->base.port = PORT_OTHER; 1228 1229 return 0; 1230 } 1231 1232 static int netvsc_set_link_ksettings(struct net_device *dev, 1233 const struct ethtool_link_ksettings *cmd) 1234 { 1235 struct net_device_context *ndc = netdev_priv(dev); 1236 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1237 1238 if (vf_netdev) { 1239 if (!vf_netdev->ethtool_ops->set_link_ksettings) 1240 return -EOPNOTSUPP; 1241 1242 return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev, 1243 cmd); 1244 } 1245 1246 return ethtool_virtdev_set_link_ksettings(dev, cmd, 1247 &ndc->speed, &ndc->duplex); 1248 } 1249 1250 static int netvsc_change_mtu(struct net_device *ndev, int mtu) 1251 { 1252 struct net_device_context *ndevctx = netdev_priv(ndev); 1253 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1254 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1255 int orig_mtu = ndev->mtu; 1256 struct netvsc_device_info *device_info; 1257 int ret = 0; 1258 1259 if (!nvdev || nvdev->destroy) 1260 return -ENODEV; 1261 1262 device_info = netvsc_devinfo_get(nvdev); 1263 1264 if (!device_info) 1265 return -ENOMEM; 1266 1267 /* Change MTU of underlying VF netdev first. */ 1268 if (vf_netdev) { 1269 ret = dev_set_mtu(vf_netdev, mtu); 1270 if (ret) 1271 goto out; 1272 } 1273 1274 ret = netvsc_detach(ndev, nvdev); 1275 if (ret) 1276 goto rollback_vf; 1277 1278 ndev->mtu = mtu; 1279 1280 ret = netvsc_attach(ndev, device_info); 1281 if (!ret) 1282 goto out; 1283 1284 /* Attempt rollback to original MTU */ 1285 ndev->mtu = orig_mtu; 1286 1287 if (netvsc_attach(ndev, device_info)) 1288 netdev_err(ndev, "restoring mtu failed\n"); 1289 rollback_vf: 1290 if (vf_netdev) 1291 dev_set_mtu(vf_netdev, orig_mtu); 1292 1293 out: 1294 netvsc_devinfo_put(device_info); 1295 return ret; 1296 } 1297 1298 static void netvsc_get_vf_stats(struct net_device *net, 1299 struct netvsc_vf_pcpu_stats *tot) 1300 { 1301 struct net_device_context *ndev_ctx = netdev_priv(net); 1302 int i; 1303 1304 memset(tot, 0, sizeof(*tot)); 1305 1306 for_each_possible_cpu(i) { 1307 const struct netvsc_vf_pcpu_stats *stats 1308 = per_cpu_ptr(ndev_ctx->vf_stats, i); 1309 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1310 unsigned int start; 1311 1312 do { 1313 start = u64_stats_fetch_begin_irq(&stats->syncp); 1314 rx_packets = stats->rx_packets; 1315 tx_packets = stats->tx_packets; 1316 rx_bytes = stats->rx_bytes; 1317 tx_bytes = stats->tx_bytes; 1318 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1319 1320 tot->rx_packets += rx_packets; 1321 tot->tx_packets += tx_packets; 1322 tot->rx_bytes += rx_bytes; 1323 tot->tx_bytes += tx_bytes; 1324 tot->tx_dropped += stats->tx_dropped; 1325 } 1326 } 1327 1328 static void netvsc_get_pcpu_stats(struct net_device *net, 1329 struct netvsc_ethtool_pcpu_stats *pcpu_tot) 1330 { 1331 struct net_device_context *ndev_ctx = netdev_priv(net); 1332 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1333 int i; 1334 1335 /* fetch percpu stats of vf */ 1336 for_each_possible_cpu(i) { 1337 const struct netvsc_vf_pcpu_stats *stats = 1338 per_cpu_ptr(ndev_ctx->vf_stats, i); 1339 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; 1340 unsigned int start; 1341 1342 do { 1343 start = u64_stats_fetch_begin_irq(&stats->syncp); 1344 this_tot->vf_rx_packets = stats->rx_packets; 1345 this_tot->vf_tx_packets = stats->tx_packets; 1346 this_tot->vf_rx_bytes = stats->rx_bytes; 1347 this_tot->vf_tx_bytes = stats->tx_bytes; 1348 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1349 this_tot->rx_packets = this_tot->vf_rx_packets; 1350 this_tot->tx_packets = this_tot->vf_tx_packets; 1351 this_tot->rx_bytes = this_tot->vf_rx_bytes; 1352 this_tot->tx_bytes = this_tot->vf_tx_bytes; 1353 } 1354 1355 /* fetch percpu stats of netvsc */ 1356 for (i = 0; i < nvdev->num_chn; i++) { 1357 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1358 const struct netvsc_stats *stats; 1359 struct netvsc_ethtool_pcpu_stats *this_tot = 1360 &pcpu_tot[nvchan->channel->target_cpu]; 1361 u64 packets, bytes; 1362 unsigned int start; 1363 1364 stats = &nvchan->tx_stats; 1365 do { 1366 start = u64_stats_fetch_begin_irq(&stats->syncp); 1367 packets = stats->packets; 1368 bytes = stats->bytes; 1369 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1370 1371 this_tot->tx_bytes += bytes; 1372 this_tot->tx_packets += packets; 1373 1374 stats = &nvchan->rx_stats; 1375 do { 1376 start = u64_stats_fetch_begin_irq(&stats->syncp); 1377 packets = stats->packets; 1378 bytes = stats->bytes; 1379 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1380 1381 this_tot->rx_bytes += bytes; 1382 this_tot->rx_packets += packets; 1383 } 1384 } 1385 1386 static void netvsc_get_stats64(struct net_device *net, 1387 struct rtnl_link_stats64 *t) 1388 { 1389 struct net_device_context *ndev_ctx = netdev_priv(net); 1390 struct netvsc_device *nvdev; 1391 struct netvsc_vf_pcpu_stats vf_tot; 1392 int i; 1393 1394 rcu_read_lock(); 1395 1396 nvdev = rcu_dereference(ndev_ctx->nvdev); 1397 if (!nvdev) 1398 goto out; 1399 1400 netdev_stats_to_stats64(t, &net->stats); 1401 1402 netvsc_get_vf_stats(net, &vf_tot); 1403 t->rx_packets += vf_tot.rx_packets; 1404 t->tx_packets += vf_tot.tx_packets; 1405 t->rx_bytes += vf_tot.rx_bytes; 1406 t->tx_bytes += vf_tot.tx_bytes; 1407 t->tx_dropped += vf_tot.tx_dropped; 1408 1409 for (i = 0; i < nvdev->num_chn; i++) { 1410 const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; 1411 const struct netvsc_stats *stats; 1412 u64 packets, bytes, multicast; 1413 unsigned int start; 1414 1415 stats = &nvchan->tx_stats; 1416 do { 1417 start = u64_stats_fetch_begin_irq(&stats->syncp); 1418 packets = stats->packets; 1419 bytes = stats->bytes; 1420 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1421 1422 t->tx_bytes += bytes; 1423 t->tx_packets += packets; 1424 1425 stats = &nvchan->rx_stats; 1426 do { 1427 start = u64_stats_fetch_begin_irq(&stats->syncp); 1428 packets = stats->packets; 1429 bytes = stats->bytes; 1430 multicast = stats->multicast + stats->broadcast; 1431 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1432 1433 t->rx_bytes += bytes; 1434 t->rx_packets += packets; 1435 t->multicast += multicast; 1436 } 1437 out: 1438 rcu_read_unlock(); 1439 } 1440 1441 static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1442 { 1443 struct net_device_context *ndc = netdev_priv(ndev); 1444 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); 1445 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1446 struct sockaddr *addr = p; 1447 int err; 1448 1449 err = eth_prepare_mac_addr_change(ndev, p); 1450 if (err) 1451 return err; 1452 1453 if (!nvdev) 1454 return -ENODEV; 1455 1456 if (vf_netdev) { 1457 err = dev_set_mac_address(vf_netdev, addr, NULL); 1458 if (err) 1459 return err; 1460 } 1461 1462 err = rndis_filter_set_device_mac(nvdev, addr->sa_data); 1463 if (!err) { 1464 eth_commit_mac_addr_change(ndev, p); 1465 } else if (vf_netdev) { 1466 /* rollback change on VF */ 1467 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); 1468 dev_set_mac_address(vf_netdev, addr, NULL); 1469 } 1470 1471 return err; 1472 } 1473 1474 static const struct { 1475 char name[ETH_GSTRING_LEN]; 1476 u16 offset; 1477 } netvsc_stats[] = { 1478 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, 1479 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, 1480 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, 1481 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, 1482 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, 1483 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, 1484 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, 1485 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, 1486 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, 1487 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, 1488 { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) }, 1489 }, pcpu_stats[] = { 1490 { "cpu%u_rx_packets", 1491 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, 1492 { "cpu%u_rx_bytes", 1493 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, 1494 { "cpu%u_tx_packets", 1495 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, 1496 { "cpu%u_tx_bytes", 1497 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, 1498 { "cpu%u_vf_rx_packets", 1499 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, 1500 { "cpu%u_vf_rx_bytes", 1501 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, 1502 { "cpu%u_vf_tx_packets", 1503 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, 1504 { "cpu%u_vf_tx_bytes", 1505 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, 1506 }, vf_stats[] = { 1507 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, 1508 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, 1509 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, 1510 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, 1511 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, 1512 }; 1513 1514 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) 1515 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) 1516 1517 /* statistics per queue (rx/tx packets/bytes) */ 1518 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) 1519 1520 /* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */ 1521 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5) 1522 1523 static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1524 { 1525 struct net_device_context *ndc = netdev_priv(dev); 1526 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1527 1528 if (!nvdev) 1529 return -ENODEV; 1530 1531 switch (string_set) { 1532 case ETH_SS_STATS: 1533 return NETVSC_GLOBAL_STATS_LEN 1534 + NETVSC_VF_STATS_LEN 1535 + NETVSC_QUEUE_STATS_LEN(nvdev) 1536 + NETVSC_PCPU_STATS_LEN; 1537 default: 1538 return -EINVAL; 1539 } 1540 } 1541 1542 static void netvsc_get_ethtool_stats(struct net_device *dev, 1543 struct ethtool_stats *stats, u64 *data) 1544 { 1545 struct net_device_context *ndc = netdev_priv(dev); 1546 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1547 const void *nds = &ndc->eth_stats; 1548 const struct netvsc_stats *qstats; 1549 struct netvsc_vf_pcpu_stats sum; 1550 struct netvsc_ethtool_pcpu_stats *pcpu_sum; 1551 unsigned int start; 1552 u64 packets, bytes; 1553 u64 xdp_drop; 1554 int i, j, cpu; 1555 1556 if (!nvdev) 1557 return; 1558 1559 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) 1560 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); 1561 1562 netvsc_get_vf_stats(dev, &sum); 1563 for (j = 0; j < NETVSC_VF_STATS_LEN; j++) 1564 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); 1565 1566 for (j = 0; j < nvdev->num_chn; j++) { 1567 qstats = &nvdev->chan_table[j].tx_stats; 1568 1569 do { 1570 start = u64_stats_fetch_begin_irq(&qstats->syncp); 1571 packets = qstats->packets; 1572 bytes = qstats->bytes; 1573 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); 1574 data[i++] = packets; 1575 data[i++] = bytes; 1576 1577 qstats = &nvdev->chan_table[j].rx_stats; 1578 do { 1579 start = u64_stats_fetch_begin_irq(&qstats->syncp); 1580 packets = qstats->packets; 1581 bytes = qstats->bytes; 1582 xdp_drop = qstats->xdp_drop; 1583 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); 1584 data[i++] = packets; 1585 data[i++] = bytes; 1586 data[i++] = xdp_drop; 1587 } 1588 1589 pcpu_sum = kvmalloc_array(num_possible_cpus(), 1590 sizeof(struct netvsc_ethtool_pcpu_stats), 1591 GFP_KERNEL); 1592 netvsc_get_pcpu_stats(dev, pcpu_sum); 1593 for_each_present_cpu(cpu) { 1594 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; 1595 1596 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) 1597 data[i++] = *(u64 *)((void *)this_sum 1598 + pcpu_stats[j].offset); 1599 } 1600 kvfree(pcpu_sum); 1601 } 1602 1603 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) 1604 { 1605 struct net_device_context *ndc = netdev_priv(dev); 1606 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1607 u8 *p = data; 1608 int i, cpu; 1609 1610 if (!nvdev) 1611 return; 1612 1613 switch (stringset) { 1614 case ETH_SS_STATS: 1615 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) 1616 ethtool_sprintf(&p, netvsc_stats[i].name); 1617 1618 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) 1619 ethtool_sprintf(&p, vf_stats[i].name); 1620 1621 for (i = 0; i < nvdev->num_chn; i++) { 1622 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 1623 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 1624 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 1625 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 1626 ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); 1627 } 1628 1629 for_each_present_cpu(cpu) { 1630 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) 1631 ethtool_sprintf(&p, pcpu_stats[i].name, cpu); 1632 } 1633 1634 break; 1635 } 1636 } 1637 1638 static int 1639 netvsc_get_rss_hash_opts(struct net_device_context *ndc, 1640 struct ethtool_rxnfc *info) 1641 { 1642 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; 1643 1644 info->data = RXH_IP_SRC | RXH_IP_DST; 1645 1646 switch (info->flow_type) { 1647 case TCP_V4_FLOW: 1648 if (ndc->l4_hash & HV_TCP4_L4HASH) 1649 info->data |= l4_flag; 1650 1651 break; 1652 1653 case TCP_V6_FLOW: 1654 if (ndc->l4_hash & HV_TCP6_L4HASH) 1655 info->data |= l4_flag; 1656 1657 break; 1658 1659 case UDP_V4_FLOW: 1660 if (ndc->l4_hash & HV_UDP4_L4HASH) 1661 info->data |= l4_flag; 1662 1663 break; 1664 1665 case UDP_V6_FLOW: 1666 if (ndc->l4_hash & HV_UDP6_L4HASH) 1667 info->data |= l4_flag; 1668 1669 break; 1670 1671 case IPV4_FLOW: 1672 case IPV6_FLOW: 1673 break; 1674 default: 1675 info->data = 0; 1676 break; 1677 } 1678 1679 return 0; 1680 } 1681 1682 static int 1683 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 1684 u32 *rules) 1685 { 1686 struct net_device_context *ndc = netdev_priv(dev); 1687 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1688 1689 if (!nvdev) 1690 return -ENODEV; 1691 1692 switch (info->cmd) { 1693 case ETHTOOL_GRXRINGS: 1694 info->data = nvdev->num_chn; 1695 return 0; 1696 1697 case ETHTOOL_GRXFH: 1698 return netvsc_get_rss_hash_opts(ndc, info); 1699 } 1700 return -EOPNOTSUPP; 1701 } 1702 1703 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, 1704 struct ethtool_rxnfc *info) 1705 { 1706 if (info->data == (RXH_IP_SRC | RXH_IP_DST | 1707 RXH_L4_B_0_1 | RXH_L4_B_2_3)) { 1708 switch (info->flow_type) { 1709 case TCP_V4_FLOW: 1710 ndc->l4_hash |= HV_TCP4_L4HASH; 1711 break; 1712 1713 case TCP_V6_FLOW: 1714 ndc->l4_hash |= HV_TCP6_L4HASH; 1715 break; 1716 1717 case UDP_V4_FLOW: 1718 ndc->l4_hash |= HV_UDP4_L4HASH; 1719 break; 1720 1721 case UDP_V6_FLOW: 1722 ndc->l4_hash |= HV_UDP6_L4HASH; 1723 break; 1724 1725 default: 1726 return -EOPNOTSUPP; 1727 } 1728 1729 return 0; 1730 } 1731 1732 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { 1733 switch (info->flow_type) { 1734 case TCP_V4_FLOW: 1735 ndc->l4_hash &= ~HV_TCP4_L4HASH; 1736 break; 1737 1738 case TCP_V6_FLOW: 1739 ndc->l4_hash &= ~HV_TCP6_L4HASH; 1740 break; 1741 1742 case UDP_V4_FLOW: 1743 ndc->l4_hash &= ~HV_UDP4_L4HASH; 1744 break; 1745 1746 case UDP_V6_FLOW: 1747 ndc->l4_hash &= ~HV_UDP6_L4HASH; 1748 break; 1749 1750 default: 1751 return -EOPNOTSUPP; 1752 } 1753 1754 return 0; 1755 } 1756 1757 return -EOPNOTSUPP; 1758 } 1759 1760 static int 1761 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) 1762 { 1763 struct net_device_context *ndc = netdev_priv(ndev); 1764 1765 if (info->cmd == ETHTOOL_SRXFH) 1766 return netvsc_set_rss_hash_opts(ndc, info); 1767 1768 return -EOPNOTSUPP; 1769 } 1770 1771 static u32 netvsc_get_rxfh_key_size(struct net_device *dev) 1772 { 1773 return NETVSC_HASH_KEYLEN; 1774 } 1775 1776 static u32 netvsc_rss_indir_size(struct net_device *dev) 1777 { 1778 return ITAB_NUM; 1779 } 1780 1781 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 1782 u8 *hfunc) 1783 { 1784 struct net_device_context *ndc = netdev_priv(dev); 1785 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1786 struct rndis_device *rndis_dev; 1787 int i; 1788 1789 if (!ndev) 1790 return -ENODEV; 1791 1792 if (hfunc) 1793 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ 1794 1795 rndis_dev = ndev->extension; 1796 if (indir) { 1797 for (i = 0; i < ITAB_NUM; i++) 1798 indir[i] = ndc->rx_table[i]; 1799 } 1800 1801 if (key) 1802 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); 1803 1804 return 0; 1805 } 1806 1807 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, 1808 const u8 *key, const u8 hfunc) 1809 { 1810 struct net_device_context *ndc = netdev_priv(dev); 1811 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); 1812 struct rndis_device *rndis_dev; 1813 int i; 1814 1815 if (!ndev) 1816 return -ENODEV; 1817 1818 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1819 return -EOPNOTSUPP; 1820 1821 rndis_dev = ndev->extension; 1822 if (indir) { 1823 for (i = 0; i < ITAB_NUM; i++) 1824 if (indir[i] >= ndev->num_chn) 1825 return -EINVAL; 1826 1827 for (i = 0; i < ITAB_NUM; i++) 1828 ndc->rx_table[i] = indir[i]; 1829 } 1830 1831 if (!key) { 1832 if (!indir) 1833 return 0; 1834 1835 key = rndis_dev->rss_key; 1836 } 1837 1838 return rndis_filter_set_rss_param(rndis_dev, key); 1839 } 1840 1841 /* Hyper-V RNDIS protocol does not have ring in the HW sense. 1842 * It does have pre-allocated receive area which is divided into sections. 1843 */ 1844 static void __netvsc_get_ringparam(struct netvsc_device *nvdev, 1845 struct ethtool_ringparam *ring) 1846 { 1847 u32 max_buf_size; 1848 1849 ring->rx_pending = nvdev->recv_section_cnt; 1850 ring->tx_pending = nvdev->send_section_cnt; 1851 1852 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 1853 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 1854 else 1855 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 1856 1857 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; 1858 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE 1859 / nvdev->send_section_size; 1860 } 1861 1862 static void netvsc_get_ringparam(struct net_device *ndev, 1863 struct ethtool_ringparam *ring) 1864 { 1865 struct net_device_context *ndevctx = netdev_priv(ndev); 1866 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1867 1868 if (!nvdev) 1869 return; 1870 1871 __netvsc_get_ringparam(nvdev, ring); 1872 } 1873 1874 static int netvsc_set_ringparam(struct net_device *ndev, 1875 struct ethtool_ringparam *ring) 1876 { 1877 struct net_device_context *ndevctx = netdev_priv(ndev); 1878 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1879 struct netvsc_device_info *device_info; 1880 struct ethtool_ringparam orig; 1881 u32 new_tx, new_rx; 1882 int ret = 0; 1883 1884 if (!nvdev || nvdev->destroy) 1885 return -ENODEV; 1886 1887 memset(&orig, 0, sizeof(orig)); 1888 __netvsc_get_ringparam(nvdev, &orig); 1889 1890 new_tx = clamp_t(u32, ring->tx_pending, 1891 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); 1892 new_rx = clamp_t(u32, ring->rx_pending, 1893 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); 1894 1895 if (new_tx == orig.tx_pending && 1896 new_rx == orig.rx_pending) 1897 return 0; /* no change */ 1898 1899 device_info = netvsc_devinfo_get(nvdev); 1900 1901 if (!device_info) 1902 return -ENOMEM; 1903 1904 device_info->send_sections = new_tx; 1905 device_info->recv_sections = new_rx; 1906 1907 ret = netvsc_detach(ndev, nvdev); 1908 if (ret) 1909 goto out; 1910 1911 ret = netvsc_attach(ndev, device_info); 1912 if (ret) { 1913 device_info->send_sections = orig.tx_pending; 1914 device_info->recv_sections = orig.rx_pending; 1915 1916 if (netvsc_attach(ndev, device_info)) 1917 netdev_err(ndev, "restoring ringparam failed"); 1918 } 1919 1920 out: 1921 netvsc_devinfo_put(device_info); 1922 return ret; 1923 } 1924 1925 static netdev_features_t netvsc_fix_features(struct net_device *ndev, 1926 netdev_features_t features) 1927 { 1928 struct net_device_context *ndevctx = netdev_priv(ndev); 1929 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1930 1931 if (!nvdev || nvdev->destroy) 1932 return features; 1933 1934 if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) { 1935 features ^= NETIF_F_LRO; 1936 netdev_info(ndev, "Skip LRO - unsupported with XDP\n"); 1937 } 1938 1939 return features; 1940 } 1941 1942 static int netvsc_set_features(struct net_device *ndev, 1943 netdev_features_t features) 1944 { 1945 netdev_features_t change = features ^ ndev->features; 1946 struct net_device_context *ndevctx = netdev_priv(ndev); 1947 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1948 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1949 struct ndis_offload_params offloads; 1950 int ret = 0; 1951 1952 if (!nvdev || nvdev->destroy) 1953 return -ENODEV; 1954 1955 if (!(change & NETIF_F_LRO)) 1956 goto syncvf; 1957 1958 memset(&offloads, 0, sizeof(struct ndis_offload_params)); 1959 1960 if (features & NETIF_F_LRO) { 1961 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; 1962 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED; 1963 } else { 1964 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; 1965 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED; 1966 } 1967 1968 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads); 1969 1970 if (ret) { 1971 features ^= NETIF_F_LRO; 1972 ndev->features = features; 1973 } 1974 1975 syncvf: 1976 if (!vf_netdev) 1977 return ret; 1978 1979 vf_netdev->wanted_features = features; 1980 netdev_update_features(vf_netdev); 1981 1982 return ret; 1983 } 1984 1985 static int netvsc_get_regs_len(struct net_device *netdev) 1986 { 1987 return VRSS_SEND_TAB_SIZE * sizeof(u32); 1988 } 1989 1990 static void netvsc_get_regs(struct net_device *netdev, 1991 struct ethtool_regs *regs, void *p) 1992 { 1993 struct net_device_context *ndc = netdev_priv(netdev); 1994 u32 *regs_buff = p; 1995 1996 /* increase the version, if buffer format is changed. */ 1997 regs->version = 1; 1998 1999 memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32)); 2000 } 2001 2002 static u32 netvsc_get_msglevel(struct net_device *ndev) 2003 { 2004 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2005 2006 return ndev_ctx->msg_enable; 2007 } 2008 2009 static void netvsc_set_msglevel(struct net_device *ndev, u32 val) 2010 { 2011 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2012 2013 ndev_ctx->msg_enable = val; 2014 } 2015 2016 static const struct ethtool_ops ethtool_ops = { 2017 .get_drvinfo = netvsc_get_drvinfo, 2018 .get_regs_len = netvsc_get_regs_len, 2019 .get_regs = netvsc_get_regs, 2020 .get_msglevel = netvsc_get_msglevel, 2021 .set_msglevel = netvsc_set_msglevel, 2022 .get_link = ethtool_op_get_link, 2023 .get_ethtool_stats = netvsc_get_ethtool_stats, 2024 .get_sset_count = netvsc_get_sset_count, 2025 .get_strings = netvsc_get_strings, 2026 .get_channels = netvsc_get_channels, 2027 .set_channels = netvsc_set_channels, 2028 .get_ts_info = ethtool_op_get_ts_info, 2029 .get_rxnfc = netvsc_get_rxnfc, 2030 .set_rxnfc = netvsc_set_rxnfc, 2031 .get_rxfh_key_size = netvsc_get_rxfh_key_size, 2032 .get_rxfh_indir_size = netvsc_rss_indir_size, 2033 .get_rxfh = netvsc_get_rxfh, 2034 .set_rxfh = netvsc_set_rxfh, 2035 .get_link_ksettings = netvsc_get_link_ksettings, 2036 .set_link_ksettings = netvsc_set_link_ksettings, 2037 .get_ringparam = netvsc_get_ringparam, 2038 .set_ringparam = netvsc_set_ringparam, 2039 }; 2040 2041 static const struct net_device_ops device_ops = { 2042 .ndo_open = netvsc_open, 2043 .ndo_stop = netvsc_close, 2044 .ndo_start_xmit = netvsc_start_xmit, 2045 .ndo_change_rx_flags = netvsc_change_rx_flags, 2046 .ndo_set_rx_mode = netvsc_set_rx_mode, 2047 .ndo_fix_features = netvsc_fix_features, 2048 .ndo_set_features = netvsc_set_features, 2049 .ndo_change_mtu = netvsc_change_mtu, 2050 .ndo_validate_addr = eth_validate_addr, 2051 .ndo_set_mac_address = netvsc_set_mac_addr, 2052 .ndo_select_queue = netvsc_select_queue, 2053 .ndo_get_stats64 = netvsc_get_stats64, 2054 .ndo_bpf = netvsc_bpf, 2055 }; 2056 2057 /* 2058 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link 2059 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is 2060 * present send GARP packet to network peers with netif_notify_peers(). 2061 */ 2062 static void netvsc_link_change(struct work_struct *w) 2063 { 2064 struct net_device_context *ndev_ctx = 2065 container_of(w, struct net_device_context, dwork.work); 2066 struct hv_device *device_obj = ndev_ctx->device_ctx; 2067 struct net_device *net = hv_get_drvdata(device_obj); 2068 unsigned long flags, next_reconfig, delay; 2069 struct netvsc_reconfig *event = NULL; 2070 struct netvsc_device *net_device; 2071 struct rndis_device *rdev; 2072 bool reschedule = false; 2073 2074 /* if changes are happening, comeback later */ 2075 if (!rtnl_trylock()) { 2076 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 2077 return; 2078 } 2079 2080 net_device = rtnl_dereference(ndev_ctx->nvdev); 2081 if (!net_device) 2082 goto out_unlock; 2083 2084 rdev = net_device->extension; 2085 2086 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; 2087 if (time_is_after_jiffies(next_reconfig)) { 2088 /* link_watch only sends one notification with current state 2089 * per second, avoid doing reconfig more frequently. Handle 2090 * wrap around. 2091 */ 2092 delay = next_reconfig - jiffies; 2093 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; 2094 schedule_delayed_work(&ndev_ctx->dwork, delay); 2095 goto out_unlock; 2096 } 2097 ndev_ctx->last_reconfig = jiffies; 2098 2099 spin_lock_irqsave(&ndev_ctx->lock, flags); 2100 if (!list_empty(&ndev_ctx->reconfig_events)) { 2101 event = list_first_entry(&ndev_ctx->reconfig_events, 2102 struct netvsc_reconfig, list); 2103 list_del(&event->list); 2104 reschedule = !list_empty(&ndev_ctx->reconfig_events); 2105 } 2106 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 2107 2108 if (!event) 2109 goto out_unlock; 2110 2111 switch (event->event) { 2112 /* Only the following events are possible due to the check in 2113 * netvsc_linkstatus_callback() 2114 */ 2115 case RNDIS_STATUS_MEDIA_CONNECT: 2116 if (rdev->link_state) { 2117 rdev->link_state = false; 2118 netif_carrier_on(net); 2119 netvsc_tx_enable(net_device, net); 2120 } else { 2121 __netdev_notify_peers(net); 2122 } 2123 kfree(event); 2124 break; 2125 case RNDIS_STATUS_MEDIA_DISCONNECT: 2126 if (!rdev->link_state) { 2127 rdev->link_state = true; 2128 netif_carrier_off(net); 2129 netvsc_tx_disable(net_device, net); 2130 } 2131 kfree(event); 2132 break; 2133 case RNDIS_STATUS_NETWORK_CHANGE: 2134 /* Only makes sense if carrier is present */ 2135 if (!rdev->link_state) { 2136 rdev->link_state = true; 2137 netif_carrier_off(net); 2138 netvsc_tx_disable(net_device, net); 2139 event->event = RNDIS_STATUS_MEDIA_CONNECT; 2140 spin_lock_irqsave(&ndev_ctx->lock, flags); 2141 list_add(&event->list, &ndev_ctx->reconfig_events); 2142 spin_unlock_irqrestore(&ndev_ctx->lock, flags); 2143 reschedule = true; 2144 } 2145 break; 2146 } 2147 2148 rtnl_unlock(); 2149 2150 /* link_watch only sends one notification with current state per 2151 * second, handle next reconfig event in 2 seconds. 2152 */ 2153 if (reschedule) 2154 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); 2155 2156 return; 2157 2158 out_unlock: 2159 rtnl_unlock(); 2160 } 2161 2162 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 2163 { 2164 struct net_device_context *net_device_ctx; 2165 struct net_device *dev; 2166 2167 dev = netdev_master_upper_dev_get(vf_netdev); 2168 if (!dev || dev->netdev_ops != &device_ops) 2169 return NULL; /* not a netvsc device */ 2170 2171 net_device_ctx = netdev_priv(dev); 2172 if (!rtnl_dereference(net_device_ctx->nvdev)) 2173 return NULL; /* device is removed */ 2174 2175 return dev; 2176 } 2177 2178 /* Called when VF is injecting data into network stack. 2179 * Change the associated network device from VF to netvsc. 2180 * note: already called with rcu_read_lock 2181 */ 2182 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) 2183 { 2184 struct sk_buff *skb = *pskb; 2185 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); 2186 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2187 struct netvsc_vf_pcpu_stats *pcpu_stats 2188 = this_cpu_ptr(ndev_ctx->vf_stats); 2189 2190 skb = skb_share_check(skb, GFP_ATOMIC); 2191 if (unlikely(!skb)) 2192 return RX_HANDLER_CONSUMED; 2193 2194 *pskb = skb; 2195 2196 skb->dev = ndev; 2197 2198 u64_stats_update_begin(&pcpu_stats->syncp); 2199 pcpu_stats->rx_packets++; 2200 pcpu_stats->rx_bytes += skb->len; 2201 u64_stats_update_end(&pcpu_stats->syncp); 2202 2203 return RX_HANDLER_ANOTHER; 2204 } 2205 2206 static int netvsc_vf_join(struct net_device *vf_netdev, 2207 struct net_device *ndev) 2208 { 2209 struct net_device_context *ndev_ctx = netdev_priv(ndev); 2210 int ret; 2211 2212 ret = netdev_rx_handler_register(vf_netdev, 2213 netvsc_vf_handle_frame, ndev); 2214 if (ret != 0) { 2215 netdev_err(vf_netdev, 2216 "can not register netvsc VF receive handler (err = %d)\n", 2217 ret); 2218 goto rx_handler_failed; 2219 } 2220 2221 ret = netdev_master_upper_dev_link(vf_netdev, ndev, 2222 NULL, NULL, NULL); 2223 if (ret != 0) { 2224 netdev_err(vf_netdev, 2225 "can not set master device %s (err = %d)\n", 2226 ndev->name, ret); 2227 goto upper_link_failed; 2228 } 2229 2230 /* set slave flag before open to prevent IPv6 addrconf */ 2231 vf_netdev->flags |= IFF_SLAVE; 2232 2233 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); 2234 2235 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); 2236 2237 netdev_info(vf_netdev, "joined to %s\n", ndev->name); 2238 return 0; 2239 2240 upper_link_failed: 2241 netdev_rx_handler_unregister(vf_netdev); 2242 rx_handler_failed: 2243 return ret; 2244 } 2245 2246 static void __netvsc_vf_setup(struct net_device *ndev, 2247 struct net_device *vf_netdev) 2248 { 2249 int ret; 2250 2251 /* Align MTU of VF with master */ 2252 ret = dev_set_mtu(vf_netdev, ndev->mtu); 2253 if (ret) 2254 netdev_warn(vf_netdev, 2255 "unable to change mtu to %u\n", ndev->mtu); 2256 2257 /* set multicast etc flags on VF */ 2258 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL); 2259 2260 /* sync address list from ndev to VF */ 2261 netif_addr_lock_bh(ndev); 2262 dev_uc_sync(vf_netdev, ndev); 2263 dev_mc_sync(vf_netdev, ndev); 2264 netif_addr_unlock_bh(ndev); 2265 2266 if (netif_running(ndev)) { 2267 ret = dev_open(vf_netdev, NULL); 2268 if (ret) 2269 netdev_warn(vf_netdev, 2270 "unable to open: %d\n", ret); 2271 } 2272 } 2273 2274 /* Setup VF as slave of the synthetic device. 2275 * Runs in workqueue to avoid recursion in netlink callbacks. 2276 */ 2277 static void netvsc_vf_setup(struct work_struct *w) 2278 { 2279 struct net_device_context *ndev_ctx 2280 = container_of(w, struct net_device_context, vf_takeover.work); 2281 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2282 struct net_device *vf_netdev; 2283 2284 if (!rtnl_trylock()) { 2285 schedule_delayed_work(&ndev_ctx->vf_takeover, 0); 2286 return; 2287 } 2288 2289 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2290 if (vf_netdev) 2291 __netvsc_vf_setup(ndev, vf_netdev); 2292 2293 rtnl_unlock(); 2294 } 2295 2296 /* Find netvsc by VF serial number. 2297 * The PCI hyperv controller records the serial number as the slot kobj name. 2298 */ 2299 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) 2300 { 2301 struct device *parent = vf_netdev->dev.parent; 2302 struct net_device_context *ndev_ctx; 2303 struct pci_dev *pdev; 2304 u32 serial; 2305 2306 if (!parent || !dev_is_pci(parent)) 2307 return NULL; /* not a PCI device */ 2308 2309 pdev = to_pci_dev(parent); 2310 if (!pdev->slot) { 2311 netdev_notice(vf_netdev, "no PCI slot information\n"); 2312 return NULL; 2313 } 2314 2315 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) { 2316 netdev_notice(vf_netdev, "Invalid vf serial:%s\n", 2317 pci_slot_name(pdev->slot)); 2318 return NULL; 2319 } 2320 2321 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { 2322 if (!ndev_ctx->vf_alloc) 2323 continue; 2324 2325 if (ndev_ctx->vf_serial == serial) 2326 return hv_get_drvdata(ndev_ctx->device_ctx); 2327 } 2328 2329 netdev_notice(vf_netdev, 2330 "no netdev found for vf serial:%u\n", serial); 2331 return NULL; 2332 } 2333 2334 static int netvsc_register_vf(struct net_device *vf_netdev) 2335 { 2336 struct net_device_context *net_device_ctx; 2337 struct netvsc_device *netvsc_dev; 2338 struct bpf_prog *prog; 2339 struct net_device *ndev; 2340 int ret; 2341 2342 if (vf_netdev->addr_len != ETH_ALEN) 2343 return NOTIFY_DONE; 2344 2345 ndev = get_netvsc_byslot(vf_netdev); 2346 if (!ndev) 2347 return NOTIFY_DONE; 2348 2349 net_device_ctx = netdev_priv(ndev); 2350 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 2351 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 2352 return NOTIFY_DONE; 2353 2354 /* if synthetic interface is a different namespace, 2355 * then move the VF to that namespace; join will be 2356 * done again in that context. 2357 */ 2358 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { 2359 ret = dev_change_net_namespace(vf_netdev, 2360 dev_net(ndev), "eth%d"); 2361 if (ret) 2362 netdev_err(vf_netdev, 2363 "could not move to same namespace as %s: %d\n", 2364 ndev->name, ret); 2365 else 2366 netdev_info(vf_netdev, 2367 "VF moved to namespace with: %s\n", 2368 ndev->name); 2369 return NOTIFY_DONE; 2370 } 2371 2372 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 2373 2374 if (netvsc_vf_join(vf_netdev, ndev) != 0) 2375 return NOTIFY_DONE; 2376 2377 dev_hold(vf_netdev); 2378 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); 2379 2380 vf_netdev->wanted_features = ndev->features; 2381 netdev_update_features(vf_netdev); 2382 2383 prog = netvsc_xdp_get(netvsc_dev); 2384 netvsc_vf_setxdp(vf_netdev, prog); 2385 2386 return NOTIFY_OK; 2387 } 2388 2389 /* Change the data path when VF UP/DOWN/CHANGE are detected. 2390 * 2391 * Typically a UP or DOWN event is followed by a CHANGE event, so 2392 * net_device_ctx->data_path_is_vf is used to cache the current data path 2393 * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate 2394 * message. 2395 * 2396 * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network 2397 * interface, there is only the CHANGE event and no UP or DOWN event. 2398 */ 2399 static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) 2400 { 2401 struct net_device_context *net_device_ctx; 2402 struct netvsc_device *netvsc_dev; 2403 struct net_device *ndev; 2404 bool vf_is_up = false; 2405 2406 if (event != NETDEV_GOING_DOWN) 2407 vf_is_up = netif_running(vf_netdev); 2408 2409 ndev = get_netvsc_byref(vf_netdev); 2410 if (!ndev) 2411 return NOTIFY_DONE; 2412 2413 net_device_ctx = netdev_priv(ndev); 2414 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); 2415 if (!netvsc_dev) 2416 return NOTIFY_DONE; 2417 2418 if (net_device_ctx->data_path_is_vf == vf_is_up) 2419 return NOTIFY_OK; 2420 2421 netvsc_switch_datapath(ndev, vf_is_up); 2422 netdev_info(ndev, "Data path switched %s VF: %s\n", 2423 vf_is_up ? "to" : "from", vf_netdev->name); 2424 2425 return NOTIFY_OK; 2426 } 2427 2428 static int netvsc_unregister_vf(struct net_device *vf_netdev) 2429 { 2430 struct net_device *ndev; 2431 struct net_device_context *net_device_ctx; 2432 2433 ndev = get_netvsc_byref(vf_netdev); 2434 if (!ndev) 2435 return NOTIFY_DONE; 2436 2437 net_device_ctx = netdev_priv(ndev); 2438 cancel_delayed_work_sync(&net_device_ctx->vf_takeover); 2439 2440 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 2441 2442 netvsc_vf_setxdp(vf_netdev, NULL); 2443 2444 netdev_rx_handler_unregister(vf_netdev); 2445 netdev_upper_dev_unlink(vf_netdev, ndev); 2446 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 2447 dev_put(vf_netdev); 2448 2449 return NOTIFY_OK; 2450 } 2451 2452 static int netvsc_probe(struct hv_device *dev, 2453 const struct hv_vmbus_device_id *dev_id) 2454 { 2455 struct net_device *net = NULL; 2456 struct net_device_context *net_device_ctx; 2457 struct netvsc_device_info *device_info = NULL; 2458 struct netvsc_device *nvdev; 2459 int ret = -ENOMEM; 2460 2461 net = alloc_etherdev_mq(sizeof(struct net_device_context), 2462 VRSS_CHANNEL_MAX); 2463 if (!net) 2464 goto no_net; 2465 2466 netif_carrier_off(net); 2467 2468 netvsc_init_settings(net); 2469 2470 net_device_ctx = netdev_priv(net); 2471 net_device_ctx->device_ctx = dev; 2472 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); 2473 if (netif_msg_probe(net_device_ctx)) 2474 netdev_dbg(net, "netvsc msg_enable: %d\n", 2475 net_device_ctx->msg_enable); 2476 2477 hv_set_drvdata(dev, net); 2478 2479 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 2480 2481 spin_lock_init(&net_device_ctx->lock); 2482 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 2483 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); 2484 2485 net_device_ctx->vf_stats 2486 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); 2487 if (!net_device_ctx->vf_stats) 2488 goto no_stats; 2489 2490 net->netdev_ops = &device_ops; 2491 net->ethtool_ops = ðtool_ops; 2492 SET_NETDEV_DEV(net, &dev->device); 2493 2494 /* We always need headroom for rndis header */ 2495 net->needed_headroom = RNDIS_AND_PPI_SIZE; 2496 2497 /* Initialize the number of queues to be 1, we may change it if more 2498 * channels are offered later. 2499 */ 2500 netif_set_real_num_tx_queues(net, 1); 2501 netif_set_real_num_rx_queues(net, 1); 2502 2503 /* Notify the netvsc driver of the new device */ 2504 device_info = netvsc_devinfo_get(NULL); 2505 2506 if (!device_info) { 2507 ret = -ENOMEM; 2508 goto devinfo_failed; 2509 } 2510 2511 nvdev = rndis_filter_device_add(dev, device_info); 2512 if (IS_ERR(nvdev)) { 2513 ret = PTR_ERR(nvdev); 2514 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2515 goto rndis_failed; 2516 } 2517 2518 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); 2519 2520 /* We must get rtnl lock before scheduling nvdev->subchan_work, 2521 * otherwise netvsc_subchan_work() can get rtnl lock first and wait 2522 * all subchannels to show up, but that may not happen because 2523 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() 2524 * -> ... -> device_add() -> ... -> __device_attach() can't get 2525 * the device lock, so all the subchannels can't be processed -- 2526 * finally netvsc_subchan_work() hangs forever. 2527 */ 2528 rtnl_lock(); 2529 2530 if (nvdev->num_chn > 1) 2531 schedule_work(&nvdev->subchan_work); 2532 2533 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2534 net->features = net->hw_features | 2535 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | 2536 NETIF_F_HW_VLAN_CTAG_RX; 2537 net->vlan_features = net->features; 2538 2539 netdev_lockdep_set_classes(net); 2540 2541 /* MTU range: 68 - 1500 or 65521 */ 2542 net->min_mtu = NETVSC_MTU_MIN; 2543 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 2544 net->max_mtu = NETVSC_MTU - ETH_HLEN; 2545 else 2546 net->max_mtu = ETH_DATA_LEN; 2547 2548 nvdev->tx_disable = false; 2549 2550 ret = register_netdevice(net); 2551 if (ret != 0) { 2552 pr_err("Unable to register netdev.\n"); 2553 goto register_failed; 2554 } 2555 2556 list_add(&net_device_ctx->list, &netvsc_dev_list); 2557 rtnl_unlock(); 2558 2559 netvsc_devinfo_put(device_info); 2560 return 0; 2561 2562 register_failed: 2563 rtnl_unlock(); 2564 rndis_filter_device_remove(dev, nvdev); 2565 rndis_failed: 2566 netvsc_devinfo_put(device_info); 2567 devinfo_failed: 2568 free_percpu(net_device_ctx->vf_stats); 2569 no_stats: 2570 hv_set_drvdata(dev, NULL); 2571 free_netdev(net); 2572 no_net: 2573 return ret; 2574 } 2575 2576 static int netvsc_remove(struct hv_device *dev) 2577 { 2578 struct net_device_context *ndev_ctx; 2579 struct net_device *vf_netdev, *net; 2580 struct netvsc_device *nvdev; 2581 2582 net = hv_get_drvdata(dev); 2583 if (net == NULL) { 2584 dev_err(&dev->device, "No net device to remove\n"); 2585 return 0; 2586 } 2587 2588 ndev_ctx = netdev_priv(net); 2589 2590 cancel_delayed_work_sync(&ndev_ctx->dwork); 2591 2592 rtnl_lock(); 2593 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2594 if (nvdev) { 2595 cancel_work_sync(&nvdev->subchan_work); 2596 netvsc_xdp_set(net, NULL, NULL, nvdev); 2597 } 2598 2599 /* 2600 * Call to the vsc driver to let it know that the device is being 2601 * removed. Also blocks mtu and channel changes. 2602 */ 2603 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2604 if (vf_netdev) 2605 netvsc_unregister_vf(vf_netdev); 2606 2607 if (nvdev) 2608 rndis_filter_device_remove(dev, nvdev); 2609 2610 unregister_netdevice(net); 2611 list_del(&ndev_ctx->list); 2612 2613 rtnl_unlock(); 2614 2615 hv_set_drvdata(dev, NULL); 2616 2617 free_percpu(ndev_ctx->vf_stats); 2618 free_netdev(net); 2619 return 0; 2620 } 2621 2622 static int netvsc_suspend(struct hv_device *dev) 2623 { 2624 struct net_device_context *ndev_ctx; 2625 struct netvsc_device *nvdev; 2626 struct net_device *net; 2627 int ret; 2628 2629 net = hv_get_drvdata(dev); 2630 2631 ndev_ctx = netdev_priv(net); 2632 cancel_delayed_work_sync(&ndev_ctx->dwork); 2633 2634 rtnl_lock(); 2635 2636 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2637 if (nvdev == NULL) { 2638 ret = -ENODEV; 2639 goto out; 2640 } 2641 2642 /* Save the current config info */ 2643 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); 2644 2645 ret = netvsc_detach(net, nvdev); 2646 out: 2647 rtnl_unlock(); 2648 2649 return ret; 2650 } 2651 2652 static int netvsc_resume(struct hv_device *dev) 2653 { 2654 struct net_device *net = hv_get_drvdata(dev); 2655 struct net_device_context *net_device_ctx; 2656 struct netvsc_device_info *device_info; 2657 int ret; 2658 2659 rtnl_lock(); 2660 2661 net_device_ctx = netdev_priv(net); 2662 2663 /* Reset the data path to the netvsc NIC before re-opening the vmbus 2664 * channel. Later netvsc_netdev_event() will switch the data path to 2665 * the VF upon the UP or CHANGE event. 2666 */ 2667 net_device_ctx->data_path_is_vf = false; 2668 device_info = net_device_ctx->saved_netvsc_dev_info; 2669 2670 ret = netvsc_attach(net, device_info); 2671 2672 netvsc_devinfo_put(device_info); 2673 net_device_ctx->saved_netvsc_dev_info = NULL; 2674 2675 rtnl_unlock(); 2676 2677 return ret; 2678 } 2679 static const struct hv_vmbus_device_id id_table[] = { 2680 /* Network guid */ 2681 { HV_NIC_GUID, }, 2682 { }, 2683 }; 2684 2685 MODULE_DEVICE_TABLE(vmbus, id_table); 2686 2687 /* The one and only one */ 2688 static struct hv_driver netvsc_drv = { 2689 .name = KBUILD_MODNAME, 2690 .id_table = id_table, 2691 .probe = netvsc_probe, 2692 .remove = netvsc_remove, 2693 .suspend = netvsc_suspend, 2694 .resume = netvsc_resume, 2695 .driver = { 2696 .probe_type = PROBE_FORCE_SYNCHRONOUS, 2697 }, 2698 }; 2699 2700 /* 2701 * On Hyper-V, every VF interface is matched with a corresponding 2702 * synthetic interface. The synthetic interface is presented first 2703 * to the guest. When the corresponding VF instance is registered, 2704 * we will take care of switching the data path. 2705 */ 2706 static int netvsc_netdev_event(struct notifier_block *this, 2707 unsigned long event, void *ptr) 2708 { 2709 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2710 2711 /* Skip our own events */ 2712 if (event_dev->netdev_ops == &device_ops) 2713 return NOTIFY_DONE; 2714 2715 /* Avoid non-Ethernet type devices */ 2716 if (event_dev->type != ARPHRD_ETHER) 2717 return NOTIFY_DONE; 2718 2719 /* Avoid Vlan dev with same MAC registering as VF */ 2720 if (is_vlan_dev(event_dev)) 2721 return NOTIFY_DONE; 2722 2723 /* Avoid Bonding master dev with same MAC registering as VF */ 2724 if ((event_dev->priv_flags & IFF_BONDING) && 2725 (event_dev->flags & IFF_MASTER)) 2726 return NOTIFY_DONE; 2727 2728 switch (event) { 2729 case NETDEV_REGISTER: 2730 return netvsc_register_vf(event_dev); 2731 case NETDEV_UNREGISTER: 2732 return netvsc_unregister_vf(event_dev); 2733 case NETDEV_UP: 2734 case NETDEV_DOWN: 2735 case NETDEV_CHANGE: 2736 case NETDEV_GOING_DOWN: 2737 return netvsc_vf_changed(event_dev, event); 2738 default: 2739 return NOTIFY_DONE; 2740 } 2741 } 2742 2743 static struct notifier_block netvsc_netdev_notifier = { 2744 .notifier_call = netvsc_netdev_event, 2745 }; 2746 2747 static void __exit netvsc_drv_exit(void) 2748 { 2749 unregister_netdevice_notifier(&netvsc_netdev_notifier); 2750 vmbus_driver_unregister(&netvsc_drv); 2751 } 2752 2753 static int __init netvsc_drv_init(void) 2754 { 2755 int ret; 2756 2757 if (ring_size < RING_SIZE_MIN) { 2758 ring_size = RING_SIZE_MIN; 2759 pr_info("Increased ring_size to %u (min allowed)\n", 2760 ring_size); 2761 } 2762 netvsc_ring_bytes = ring_size * PAGE_SIZE; 2763 2764 ret = vmbus_driver_register(&netvsc_drv); 2765 if (ret) 2766 return ret; 2767 2768 register_netdevice_notifier(&netvsc_netdev_notifier); 2769 return 0; 2770 } 2771 2772 MODULE_LICENSE("GPL"); 2773 MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 2774 2775 module_init(netvsc_drv_init); 2776 module_exit(netvsc_drv_exit); 2777