1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/init.h> 23 #include <linux/atomic.h> 24 #include <linux/module.h> 25 #include <linux/highmem.h> 26 #include <linux/device.h> 27 #include <linux/io.h> 28 #include <linux/delay.h> 29 #include <linux/netdevice.h> 30 #include <linux/inetdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/skbuff.h> 33 #include <linux/if_vlan.h> 34 #include <linux/in.h> 35 #include <linux/slab.h> 36 #include <net/arp.h> 37 #include <net/route.h> 38 #include <net/sock.h> 39 #include <net/pkt_sched.h> 40 41 #include "hyperv_net.h" 42 43 44 #define RING_SIZE_MIN 64 45 static int ring_size = 128; 46 module_param(ring_size, int, S_IRUGO); 47 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 48 49 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 50 NETIF_MSG_LINK | NETIF_MSG_IFUP | 51 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | 52 NETIF_MSG_TX_ERR; 53 54 static int debug = -1; 55 module_param(debug, int, S_IRUGO); 56 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 57 58 static void do_set_multicast(struct work_struct *w) 59 { 60 struct net_device_context *ndevctx = 61 container_of(w, struct net_device_context, work); 62 struct netvsc_device *nvdev; 63 struct rndis_device *rdev; 64 65 nvdev = hv_get_drvdata(ndevctx->device_ctx); 66 if (nvdev == NULL || nvdev->ndev == NULL) 67 return; 68 69 rdev = nvdev->extension; 70 if (rdev == NULL) 71 return; 72 73 if (nvdev->ndev->flags & IFF_PROMISC) 74 rndis_filter_set_packet_filter(rdev, 75 NDIS_PACKET_TYPE_PROMISCUOUS); 76 else 77 rndis_filter_set_packet_filter(rdev, 78 NDIS_PACKET_TYPE_BROADCAST | 79 NDIS_PACKET_TYPE_ALL_MULTICAST | 80 NDIS_PACKET_TYPE_DIRECTED); 81 } 82 83 static void netvsc_set_multicast_list(struct net_device *net) 84 { 85 struct net_device_context *net_device_ctx = netdev_priv(net); 86 87 schedule_work(&net_device_ctx->work); 88 } 89 90 static int netvsc_open(struct net_device *net) 91 { 92 struct net_device_context *net_device_ctx = netdev_priv(net); 93 struct hv_device *device_obj = net_device_ctx->device_ctx; 94 struct netvsc_device *nvdev; 95 struct rndis_device *rdev; 96 int ret = 0; 97 98 netif_carrier_off(net); 99 100 /* Open up the device */ 101 ret = rndis_filter_open(device_obj); 102 if (ret != 0) { 103 netdev_err(net, "unable to open device (ret %d).\n", ret); 104 return ret; 105 } 106 107 netif_tx_start_all_queues(net); 108 109 nvdev = hv_get_drvdata(device_obj); 110 rdev = nvdev->extension; 111 if (!rdev->link_state) 112 netif_carrier_on(net); 113 114 return ret; 115 } 116 117 static int netvsc_close(struct net_device *net) 118 { 119 struct net_device_context *net_device_ctx = netdev_priv(net); 120 struct hv_device *device_obj = net_device_ctx->device_ctx; 121 int ret; 122 123 netif_tx_disable(net); 124 125 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ 126 cancel_work_sync(&net_device_ctx->work); 127 ret = rndis_filter_close(device_obj); 128 if (ret != 0) 129 netdev_err(net, "unable to close device (ret %d).\n", ret); 130 131 return ret; 132 } 133 134 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, 135 int pkt_type) 136 { 137 struct rndis_packet *rndis_pkt; 138 struct rndis_per_packet_info *ppi; 139 140 rndis_pkt = &msg->msg.pkt; 141 rndis_pkt->data_offset += ppi_size; 142 143 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt + 144 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len); 145 146 ppi->size = ppi_size; 147 ppi->type = pkt_type; 148 ppi->ppi_offset = sizeof(struct rndis_per_packet_info); 149 150 rndis_pkt->per_pkt_info_len += ppi_size; 151 152 return ppi; 153 } 154 155 union sub_key { 156 u64 k; 157 struct { 158 u8 pad[3]; 159 u8 kb; 160 u32 ka; 161 }; 162 }; 163 164 /* Toeplitz hash function 165 * data: network byte order 166 * return: host byte order 167 */ 168 static u32 comp_hash(u8 *key, int klen, void *data, int dlen) 169 { 170 union sub_key subk; 171 int k_next = 4; 172 u8 dt; 173 int i, j; 174 u32 ret = 0; 175 176 subk.k = 0; 177 subk.ka = ntohl(*(u32 *)key); 178 179 for (i = 0; i < dlen; i++) { 180 subk.kb = key[k_next]; 181 k_next = (k_next + 1) % klen; 182 dt = ((u8 *)data)[i]; 183 for (j = 0; j < 8; j++) { 184 if (dt & 0x80) 185 ret ^= subk.ka; 186 dt <<= 1; 187 subk.k <<= 1; 188 } 189 } 190 191 return ret; 192 } 193 194 static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) 195 { 196 struct flow_keys flow; 197 int data_len; 198 199 if (!skb_flow_dissect(skb, &flow) || 200 !(flow.n_proto == htons(ETH_P_IP) || 201 flow.n_proto == htons(ETH_P_IPV6))) 202 return false; 203 204 if (flow.ip_proto == IPPROTO_TCP) 205 data_len = 12; 206 else 207 data_len = 8; 208 209 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); 210 211 return true; 212 } 213 214 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, 215 void *accel_priv, select_queue_fallback_t fallback) 216 { 217 struct net_device_context *net_device_ctx = netdev_priv(ndev); 218 struct hv_device *hdev = net_device_ctx->device_ctx; 219 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev); 220 u32 hash; 221 u16 q_idx = 0; 222 223 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) 224 return 0; 225 226 if (netvsc_set_hash(&hash, skb)) { 227 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % 228 ndev->real_num_tx_queues; 229 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); 230 } 231 232 return q_idx; 233 } 234 235 void netvsc_xmit_completion(void *context) 236 { 237 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 238 struct sk_buff *skb = (struct sk_buff *) 239 (unsigned long)packet->send_completion_tid; 240 241 if (skb) 242 dev_kfree_skb_any(skb); 243 } 244 245 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, 246 struct hv_page_buffer *pb) 247 { 248 int j = 0; 249 250 /* Deal with compund pages by ignoring unused part 251 * of the page. 252 */ 253 page += (offset >> PAGE_SHIFT); 254 offset &= ~PAGE_MASK; 255 256 while (len > 0) { 257 unsigned long bytes; 258 259 bytes = PAGE_SIZE - offset; 260 if (bytes > len) 261 bytes = len; 262 pb[j].pfn = page_to_pfn(page); 263 pb[j].offset = offset; 264 pb[j].len = bytes; 265 266 offset += bytes; 267 len -= bytes; 268 269 if (offset == PAGE_SIZE && len) { 270 page++; 271 offset = 0; 272 j++; 273 } 274 } 275 276 return j + 1; 277 } 278 279 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, 280 struct hv_netvsc_packet *packet) 281 { 282 struct hv_page_buffer *pb = packet->page_buf; 283 u32 slots_used = 0; 284 char *data = skb->data; 285 int frags = skb_shinfo(skb)->nr_frags; 286 int i; 287 288 /* The packet is laid out thus: 289 * 1. hdr: RNDIS header and PPI 290 * 2. skb linear data 291 * 3. skb fragment data 292 */ 293 if (hdr != NULL) 294 slots_used += fill_pg_buf(virt_to_page(hdr), 295 offset_in_page(hdr), 296 len, &pb[slots_used]); 297 298 packet->rmsg_size = len; 299 packet->rmsg_pgcnt = slots_used; 300 301 slots_used += fill_pg_buf(virt_to_page(data), 302 offset_in_page(data), 303 skb_headlen(skb), &pb[slots_used]); 304 305 for (i = 0; i < frags; i++) { 306 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 307 308 slots_used += fill_pg_buf(skb_frag_page(frag), 309 frag->page_offset, 310 skb_frag_size(frag), &pb[slots_used]); 311 } 312 return slots_used; 313 } 314 315 static int count_skb_frag_slots(struct sk_buff *skb) 316 { 317 int i, frags = skb_shinfo(skb)->nr_frags; 318 int pages = 0; 319 320 for (i = 0; i < frags; i++) { 321 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 322 unsigned long size = skb_frag_size(frag); 323 unsigned long offset = frag->page_offset; 324 325 /* Skip unused frames from start of page */ 326 offset &= ~PAGE_MASK; 327 pages += PFN_UP(offset + size); 328 } 329 return pages; 330 } 331 332 static int netvsc_get_slots(struct sk_buff *skb) 333 { 334 char *data = skb->data; 335 unsigned int offset = offset_in_page(data); 336 unsigned int len = skb_headlen(skb); 337 int slots; 338 int frag_slots; 339 340 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); 341 frag_slots = count_skb_frag_slots(skb); 342 return slots + frag_slots; 343 } 344 345 static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off) 346 { 347 u32 ret_val = TRANSPORT_INFO_NOT_IP; 348 349 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) && 350 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) { 351 goto not_ip; 352 } 353 354 *trans_off = skb_transport_offset(skb); 355 356 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) { 357 struct iphdr *iphdr = ip_hdr(skb); 358 359 if (iphdr->protocol == IPPROTO_TCP) 360 ret_val = TRANSPORT_INFO_IPV4_TCP; 361 else if (iphdr->protocol == IPPROTO_UDP) 362 ret_val = TRANSPORT_INFO_IPV4_UDP; 363 } else { 364 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 365 ret_val = TRANSPORT_INFO_IPV6_TCP; 366 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 367 ret_val = TRANSPORT_INFO_IPV6_UDP; 368 } 369 370 not_ip: 371 return ret_val; 372 } 373 374 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) 375 { 376 struct net_device_context *net_device_ctx = netdev_priv(net); 377 struct hv_netvsc_packet *packet = NULL; 378 int ret; 379 unsigned int num_data_pgs; 380 struct rndis_message *rndis_msg; 381 struct rndis_packet *rndis_pkt; 382 u32 rndis_msg_size; 383 bool isvlan; 384 bool linear = false; 385 struct rndis_per_packet_info *ppi; 386 struct ndis_tcp_ip_checksum_info *csum_info; 387 struct ndis_tcp_lso_info *lso_info; 388 int hdr_offset; 389 u32 net_trans_info; 390 u32 hash; 391 u32 skb_length; 392 u32 pkt_sz; 393 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; 394 395 396 /* We will atmost need two pages to describe the rndis 397 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number 398 * of pages in a single packet. If skb is scattered around 399 * more pages we try linearizing it. 400 */ 401 402 check_size: 403 skb_length = skb->len; 404 num_data_pgs = netvsc_get_slots(skb) + 2; 405 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { 406 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", 407 num_data_pgs, skb->len); 408 ret = -EFAULT; 409 goto drop; 410 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 411 if (skb_linearize(skb)) { 412 net_alert_ratelimited("failed to linearize skb\n"); 413 ret = -ENOMEM; 414 goto drop; 415 } 416 linear = true; 417 goto check_size; 418 } 419 420 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; 421 422 ret = skb_cow_head(skb, pkt_sz); 423 if (ret) { 424 netdev_err(net, "unable to alloc hv_netvsc_packet\n"); 425 ret = -ENOMEM; 426 goto drop; 427 } 428 /* Use the headroom for building up the packet */ 429 packet = (struct hv_netvsc_packet *)skb->head; 430 431 packet->status = 0; 432 packet->xmit_more = skb->xmit_more; 433 434 packet->vlan_tci = skb->vlan_tci; 435 packet->page_buf = page_buf; 436 437 packet->q_idx = skb_get_queue_mapping(skb); 438 439 packet->is_data_pkt = true; 440 packet->total_data_buflen = skb->len; 441 442 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet + 443 sizeof(struct hv_netvsc_packet)); 444 445 memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE); 446 447 /* Set the completion routine */ 448 packet->send_completion = netvsc_xmit_completion; 449 packet->send_completion_ctx = packet; 450 packet->send_completion_tid = (unsigned long)skb; 451 452 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT; 453 454 /* Add the rndis header */ 455 rndis_msg = packet->rndis_msg; 456 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; 457 rndis_msg->msg_len = packet->total_data_buflen; 458 rndis_pkt = &rndis_msg->msg.pkt; 459 rndis_pkt->data_offset = sizeof(struct rndis_packet); 460 rndis_pkt->data_len = packet->total_data_buflen; 461 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); 462 463 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); 464 465 hash = skb_get_hash_raw(skb); 466 if (hash != 0 && net->real_num_tx_queues > 1) { 467 rndis_msg_size += NDIS_HASH_PPI_SIZE; 468 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, 469 NBL_HASH_VALUE); 470 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; 471 } 472 473 if (isvlan) { 474 struct ndis_pkt_8021q_info *vlan; 475 476 rndis_msg_size += NDIS_VLAN_PPI_SIZE; 477 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, 478 IEEE_8021Q_INFO); 479 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + 480 ppi->ppi_offset); 481 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK; 482 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >> 483 VLAN_PRIO_SHIFT; 484 } 485 486 net_trans_info = get_net_transport_info(skb, &hdr_offset); 487 if (net_trans_info == TRANSPORT_INFO_NOT_IP) 488 goto do_send; 489 490 /* 491 * Setup the sendside checksum offload only if this is not a 492 * GSO packet. 493 */ 494 if (skb_is_gso(skb)) 495 goto do_lso; 496 497 if ((skb->ip_summed == CHECKSUM_NONE) || 498 (skb->ip_summed == CHECKSUM_UNNECESSARY)) 499 goto do_send; 500 501 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 502 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 503 TCPIP_CHKSUM_PKTINFO); 504 505 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + 506 ppi->ppi_offset); 507 508 if (net_trans_info & (INFO_IPV4 << 16)) 509 csum_info->transmit.is_ipv4 = 1; 510 else 511 csum_info->transmit.is_ipv6 = 1; 512 513 if (net_trans_info & INFO_TCP) { 514 csum_info->transmit.tcp_checksum = 1; 515 csum_info->transmit.tcp_header_offset = hdr_offset; 516 } else if (net_trans_info & INFO_UDP) { 517 /* UDP checksum offload is not supported on ws2008r2. 518 * Furthermore, on ws2012 and ws2012r2, there are some 519 * issues with udp checksum offload from Linux guests. 520 * (these are host issues). 521 * For now compute the checksum here. 522 */ 523 struct udphdr *uh; 524 u16 udp_len; 525 526 ret = skb_cow_head(skb, 0); 527 if (ret) 528 goto drop; 529 530 uh = udp_hdr(skb); 531 udp_len = ntohs(uh->len); 532 uh->check = 0; 533 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, 534 ip_hdr(skb)->daddr, 535 udp_len, IPPROTO_UDP, 536 csum_partial(uh, udp_len, 0)); 537 if (uh->check == 0) 538 uh->check = CSUM_MANGLED_0; 539 540 csum_info->transmit.udp_checksum = 0; 541 } 542 goto do_send; 543 544 do_lso: 545 rndis_msg_size += NDIS_LSO_PPI_SIZE; 546 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, 547 TCP_LARGESEND_PKTINFO); 548 549 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + 550 ppi->ppi_offset); 551 552 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; 553 if (net_trans_info & (INFO_IPV4 << 16)) { 554 lso_info->lso_v2_transmit.ip_version = 555 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; 556 ip_hdr(skb)->tot_len = 0; 557 ip_hdr(skb)->check = 0; 558 tcp_hdr(skb)->check = 559 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 560 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 561 } else { 562 lso_info->lso_v2_transmit.ip_version = 563 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; 564 ipv6_hdr(skb)->payload_len = 0; 565 tcp_hdr(skb)->check = 566 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 567 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 568 } 569 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; 570 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; 571 572 do_send: 573 /* Start filling in the page buffers with the rndis hdr */ 574 rndis_msg->msg_len += rndis_msg_size; 575 packet->total_data_buflen = rndis_msg->msg_len; 576 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 577 skb, packet); 578 579 ret = netvsc_send(net_device_ctx->device_ctx, packet); 580 581 drop: 582 if (ret == 0) { 583 net->stats.tx_bytes += skb_length; 584 net->stats.tx_packets++; 585 } else { 586 if (ret != -EAGAIN) { 587 dev_kfree_skb_any(skb); 588 net->stats.tx_dropped++; 589 } 590 } 591 592 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK; 593 } 594 595 /* 596 * netvsc_linkstatus_callback - Link up/down notification 597 */ 598 void netvsc_linkstatus_callback(struct hv_device *device_obj, 599 struct rndis_message *resp) 600 { 601 struct rndis_indicate_status *indicate = &resp->msg.indicate_status; 602 struct net_device *net; 603 struct net_device_context *ndev_ctx; 604 struct netvsc_device *net_device; 605 struct rndis_device *rdev; 606 607 net_device = hv_get_drvdata(device_obj); 608 rdev = net_device->extension; 609 610 switch (indicate->status) { 611 case RNDIS_STATUS_MEDIA_CONNECT: 612 rdev->link_state = false; 613 break; 614 case RNDIS_STATUS_MEDIA_DISCONNECT: 615 rdev->link_state = true; 616 break; 617 case RNDIS_STATUS_NETWORK_CHANGE: 618 rdev->link_change = true; 619 break; 620 default: 621 return; 622 } 623 624 net = net_device->ndev; 625 626 if (!net || net->reg_state != NETREG_REGISTERED) 627 return; 628 629 ndev_ctx = netdev_priv(net); 630 if (!rdev->link_state) { 631 schedule_delayed_work(&ndev_ctx->dwork, 0); 632 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 633 } else { 634 schedule_delayed_work(&ndev_ctx->dwork, 0); 635 } 636 } 637 638 /* 639 * netvsc_recv_callback - Callback when we receive a packet from the 640 * "wire" on the specified device. 641 */ 642 int netvsc_recv_callback(struct hv_device *device_obj, 643 struct hv_netvsc_packet *packet, 644 struct ndis_tcp_ip_checksum_info *csum_info) 645 { 646 struct net_device *net; 647 struct sk_buff *skb; 648 649 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 650 if (!net || net->reg_state != NETREG_REGISTERED) { 651 packet->status = NVSP_STAT_FAIL; 652 return 0; 653 } 654 655 /* Allocate a skb - TODO direct I/O to pages? */ 656 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); 657 if (unlikely(!skb)) { 658 ++net->stats.rx_dropped; 659 packet->status = NVSP_STAT_FAIL; 660 return 0; 661 } 662 663 /* 664 * Copy to skb. This copy is needed here since the memory pointed by 665 * hv_netvsc_packet cannot be deallocated 666 */ 667 memcpy(skb_put(skb, packet->total_data_buflen), packet->data, 668 packet->total_data_buflen); 669 670 skb->protocol = eth_type_trans(skb, net); 671 if (csum_info) { 672 /* We only look at the IP checksum here. 673 * Should we be dropping the packet if checksum 674 * failed? How do we deal with other checksums - TCP/UDP? 675 */ 676 if (csum_info->receive.ip_checksum_succeeded) 677 skb->ip_summed = CHECKSUM_UNNECESSARY; 678 else 679 skb->ip_summed = CHECKSUM_NONE; 680 } 681 682 if (packet->vlan_tci & VLAN_TAG_PRESENT) 683 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 684 packet->vlan_tci); 685 686 skb_record_rx_queue(skb, packet->channel-> 687 offermsg.offer.sub_channel_index); 688 689 net->stats.rx_packets++; 690 net->stats.rx_bytes += packet->total_data_buflen; 691 692 /* 693 * Pass the skb back up. Network stack will deallocate the skb when it 694 * is done. 695 * TODO - use NAPI? 696 */ 697 netif_rx(skb); 698 699 return 0; 700 } 701 702 static void netvsc_get_drvinfo(struct net_device *net, 703 struct ethtool_drvinfo *info) 704 { 705 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 706 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 707 } 708 709 static void netvsc_get_channels(struct net_device *net, 710 struct ethtool_channels *channel) 711 { 712 struct net_device_context *net_device_ctx = netdev_priv(net); 713 struct hv_device *dev = net_device_ctx->device_ctx; 714 struct netvsc_device *nvdev = hv_get_drvdata(dev); 715 716 if (nvdev) { 717 channel->max_combined = nvdev->max_chn; 718 channel->combined_count = nvdev->num_chn; 719 } 720 } 721 722 static int netvsc_change_mtu(struct net_device *ndev, int mtu) 723 { 724 struct net_device_context *ndevctx = netdev_priv(ndev); 725 struct hv_device *hdev = ndevctx->device_ctx; 726 struct netvsc_device *nvdev = hv_get_drvdata(hdev); 727 struct netvsc_device_info device_info; 728 int limit = ETH_DATA_LEN; 729 730 if (nvdev == NULL || nvdev->destroy) 731 return -ENODEV; 732 733 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) 734 limit = NETVSC_MTU - ETH_HLEN; 735 736 /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ 737 if (mtu < ETH_DATA_LEN || mtu > limit) 738 return -EINVAL; 739 740 nvdev->start_remove = true; 741 cancel_work_sync(&ndevctx->work); 742 netif_tx_disable(ndev); 743 rndis_filter_device_remove(hdev); 744 745 ndev->mtu = mtu; 746 747 ndevctx->device_ctx = hdev; 748 hv_set_drvdata(hdev, ndev); 749 device_info.ring_size = ring_size; 750 rndis_filter_device_add(hdev, &device_info); 751 netif_tx_wake_all_queues(ndev); 752 753 return 0; 754 } 755 756 757 static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 758 { 759 struct net_device_context *ndevctx = netdev_priv(ndev); 760 struct hv_device *hdev = ndevctx->device_ctx; 761 struct sockaddr *addr = p; 762 char save_adr[ETH_ALEN]; 763 unsigned char save_aatype; 764 int err; 765 766 memcpy(save_adr, ndev->dev_addr, ETH_ALEN); 767 save_aatype = ndev->addr_assign_type; 768 769 err = eth_mac_addr(ndev, p); 770 if (err != 0) 771 return err; 772 773 err = rndis_filter_set_device_mac(hdev, addr->sa_data); 774 if (err != 0) { 775 /* roll back to saved MAC */ 776 memcpy(ndev->dev_addr, save_adr, ETH_ALEN); 777 ndev->addr_assign_type = save_aatype; 778 } 779 780 return err; 781 } 782 783 #ifdef CONFIG_NET_POLL_CONTROLLER 784 static void netvsc_poll_controller(struct net_device *net) 785 { 786 /* As netvsc_start_xmit() works synchronous we don't have to 787 * trigger anything here. 788 */ 789 } 790 #endif 791 792 static const struct ethtool_ops ethtool_ops = { 793 .get_drvinfo = netvsc_get_drvinfo, 794 .get_link = ethtool_op_get_link, 795 .get_channels = netvsc_get_channels, 796 }; 797 798 static const struct net_device_ops device_ops = { 799 .ndo_open = netvsc_open, 800 .ndo_stop = netvsc_close, 801 .ndo_start_xmit = netvsc_start_xmit, 802 .ndo_set_rx_mode = netvsc_set_multicast_list, 803 .ndo_change_mtu = netvsc_change_mtu, 804 .ndo_validate_addr = eth_validate_addr, 805 .ndo_set_mac_address = netvsc_set_mac_addr, 806 .ndo_select_queue = netvsc_select_queue, 807 #ifdef CONFIG_NET_POLL_CONTROLLER 808 .ndo_poll_controller = netvsc_poll_controller, 809 #endif 810 }; 811 812 /* 813 * Send GARP packet to network peers after migrations. 814 * After Quick Migration, the network is not immediately operational in the 815 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add 816 * another netif_notify_peers() into a delayed work, otherwise GARP packet 817 * will not be sent after quick migration, and cause network disconnection. 818 * Also, we update the carrier status here. 819 */ 820 static void netvsc_link_change(struct work_struct *w) 821 { 822 struct net_device_context *ndev_ctx; 823 struct net_device *net; 824 struct netvsc_device *net_device; 825 struct rndis_device *rdev; 826 bool notify, refresh = false; 827 char *argv[] = { "/etc/init.d/network", "restart", NULL }; 828 char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; 829 830 rtnl_lock(); 831 832 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 833 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 834 rdev = net_device->extension; 835 net = net_device->ndev; 836 837 if (rdev->link_state) { 838 netif_carrier_off(net); 839 notify = false; 840 } else { 841 netif_carrier_on(net); 842 notify = true; 843 if (rdev->link_change) { 844 rdev->link_change = false; 845 refresh = true; 846 } 847 } 848 849 rtnl_unlock(); 850 851 if (refresh) 852 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 853 854 if (notify) 855 netdev_notify_peers(net); 856 } 857 858 859 static int netvsc_probe(struct hv_device *dev, 860 const struct hv_vmbus_device_id *dev_id) 861 { 862 struct net_device *net = NULL; 863 struct net_device_context *net_device_ctx; 864 struct netvsc_device_info device_info; 865 struct netvsc_device *nvdev; 866 int ret; 867 u32 max_needed_headroom; 868 869 net = alloc_etherdev_mq(sizeof(struct net_device_context), 870 num_online_cpus()); 871 if (!net) 872 return -ENOMEM; 873 874 max_needed_headroom = sizeof(struct hv_netvsc_packet) + 875 RNDIS_AND_PPI_SIZE; 876 877 netif_carrier_off(net); 878 879 net_device_ctx = netdev_priv(net); 880 net_device_ctx->device_ctx = dev; 881 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); 882 if (netif_msg_probe(net_device_ctx)) 883 netdev_dbg(net, "netvsc msg_enable: %d\n", 884 net_device_ctx->msg_enable); 885 886 hv_set_drvdata(dev, net); 887 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 888 INIT_WORK(&net_device_ctx->work, do_set_multicast); 889 890 net->netdev_ops = &device_ops; 891 892 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | 893 NETIF_F_TSO; 894 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | 895 NETIF_F_IP_CSUM | NETIF_F_TSO; 896 897 net->ethtool_ops = ðtool_ops; 898 SET_NETDEV_DEV(net, &dev->device); 899 900 /* 901 * Request additional head room in the skb. 902 * We will use this space to build the rndis 903 * heaser and other state we need to maintain. 904 */ 905 net->needed_headroom = max_needed_headroom; 906 907 /* Notify the netvsc driver of the new device */ 908 device_info.ring_size = ring_size; 909 ret = rndis_filter_device_add(dev, &device_info); 910 if (ret != 0) { 911 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 912 free_netdev(net); 913 hv_set_drvdata(dev, NULL); 914 return ret; 915 } 916 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 917 918 nvdev = hv_get_drvdata(dev); 919 netif_set_real_num_tx_queues(net, nvdev->num_chn); 920 netif_set_real_num_rx_queues(net, nvdev->num_chn); 921 922 ret = register_netdev(net); 923 if (ret != 0) { 924 pr_err("Unable to register netdev.\n"); 925 rndis_filter_device_remove(dev); 926 free_netdev(net); 927 } else { 928 schedule_delayed_work(&net_device_ctx->dwork, 0); 929 } 930 931 return ret; 932 } 933 934 static int netvsc_remove(struct hv_device *dev) 935 { 936 struct net_device *net; 937 struct net_device_context *ndev_ctx; 938 struct netvsc_device *net_device; 939 940 net_device = hv_get_drvdata(dev); 941 net = net_device->ndev; 942 943 if (net == NULL) { 944 dev_err(&dev->device, "No net device to remove\n"); 945 return 0; 946 } 947 948 net_device->start_remove = true; 949 950 ndev_ctx = netdev_priv(net); 951 cancel_delayed_work_sync(&ndev_ctx->dwork); 952 cancel_work_sync(&ndev_ctx->work); 953 954 /* Stop outbound asap */ 955 netif_tx_disable(net); 956 957 unregister_netdev(net); 958 959 /* 960 * Call to the vsc driver to let it know that the device is being 961 * removed 962 */ 963 rndis_filter_device_remove(dev); 964 965 free_netdev(net); 966 return 0; 967 } 968 969 static const struct hv_vmbus_device_id id_table[] = { 970 /* Network guid */ 971 { HV_NIC_GUID, }, 972 { }, 973 }; 974 975 MODULE_DEVICE_TABLE(vmbus, id_table); 976 977 /* The one and only one */ 978 static struct hv_driver netvsc_drv = { 979 .name = KBUILD_MODNAME, 980 .id_table = id_table, 981 .probe = netvsc_probe, 982 .remove = netvsc_remove, 983 }; 984 985 static void __exit netvsc_drv_exit(void) 986 { 987 vmbus_driver_unregister(&netvsc_drv); 988 } 989 990 static int __init netvsc_drv_init(void) 991 { 992 if (ring_size < RING_SIZE_MIN) { 993 ring_size = RING_SIZE_MIN; 994 pr_info("Increased ring_size to %d (min allowed)\n", 995 ring_size); 996 } 997 return vmbus_driver_register(&netvsc_drv); 998 } 999 1000 MODULE_LICENSE("GPL"); 1001 MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); 1002 1003 module_init(netvsc_drv_init); 1004 module_exit(netvsc_drv_exit); 1005