1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/wait.h> 25 #include <linux/mm.h> 26 #include <linux/delay.h> 27 #include <linux/io.h> 28 #include <linux/slab.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_ether.h> 31 #include <asm/sync_bitops.h> 32 33 #include "hyperv_net.h" 34 35 36 static struct netvsc_device *alloc_net_device(struct hv_device *device) 37 { 38 struct netvsc_device *net_device; 39 struct net_device *ndev = hv_get_drvdata(device); 40 41 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); 42 if (!net_device) 43 return NULL; 44 45 init_waitqueue_head(&net_device->wait_drain); 46 net_device->start_remove = false; 47 net_device->destroy = false; 48 net_device->dev = device; 49 net_device->ndev = ndev; 50 51 hv_set_drvdata(device, net_device); 52 return net_device; 53 } 54 55 static struct netvsc_device *get_outbound_net_device(struct hv_device *device) 56 { 57 struct netvsc_device *net_device; 58 59 net_device = hv_get_drvdata(device); 60 if (net_device && net_device->destroy) 61 net_device = NULL; 62 63 return net_device; 64 } 65 66 static struct netvsc_device *get_inbound_net_device(struct hv_device *device) 67 { 68 struct netvsc_device *net_device; 69 70 net_device = hv_get_drvdata(device); 71 72 if (!net_device) 73 goto get_in_err; 74 75 if (net_device->destroy && 76 atomic_read(&net_device->num_outstanding_sends) == 0) 77 net_device = NULL; 78 79 get_in_err: 80 return net_device; 81 } 82 83 84 static int netvsc_destroy_buf(struct netvsc_device *net_device) 85 { 86 struct nvsp_message *revoke_packet; 87 int ret = 0; 88 struct net_device *ndev = net_device->ndev; 89 90 /* 91 * If we got a section count, it means we received a 92 * SendReceiveBufferComplete msg (ie sent 93 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 94 * to send a revoke msg here 95 */ 96 if (net_device->recv_section_cnt) { 97 /* Send the revoke receive buffer */ 98 revoke_packet = &net_device->revoke_packet; 99 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 100 101 revoke_packet->hdr.msg_type = 102 NVSP_MSG1_TYPE_REVOKE_RECV_BUF; 103 revoke_packet->msg.v1_msg. 104 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 105 106 ret = vmbus_sendpacket(net_device->dev->channel, 107 revoke_packet, 108 sizeof(struct nvsp_message), 109 (unsigned long)revoke_packet, 110 VM_PKT_DATA_INBAND, 0); 111 /* 112 * If we failed here, we might as well return and 113 * have a leak rather than continue and a bugchk 114 */ 115 if (ret != 0) { 116 netdev_err(ndev, "unable to send " 117 "revoke receive buffer to netvsp\n"); 118 return ret; 119 } 120 } 121 122 /* Teardown the gpadl on the vsp end */ 123 if (net_device->recv_buf_gpadl_handle) { 124 ret = vmbus_teardown_gpadl(net_device->dev->channel, 125 net_device->recv_buf_gpadl_handle); 126 127 /* If we failed here, we might as well return and have a leak 128 * rather than continue and a bugchk 129 */ 130 if (ret != 0) { 131 netdev_err(ndev, 132 "unable to teardown receive buffer's gpadl\n"); 133 return ret; 134 } 135 net_device->recv_buf_gpadl_handle = 0; 136 } 137 138 if (net_device->recv_buf) { 139 /* Free up the receive buffer */ 140 vfree(net_device->recv_buf); 141 net_device->recv_buf = NULL; 142 } 143 144 if (net_device->recv_section) { 145 net_device->recv_section_cnt = 0; 146 kfree(net_device->recv_section); 147 net_device->recv_section = NULL; 148 } 149 150 /* Deal with the send buffer we may have setup. 151 * If we got a send section size, it means we received a 152 * SendsendBufferComplete msg (ie sent 153 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 154 * to send a revoke msg here 155 */ 156 if (net_device->send_section_size) { 157 /* Send the revoke receive buffer */ 158 revoke_packet = &net_device->revoke_packet; 159 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 160 161 revoke_packet->hdr.msg_type = 162 NVSP_MSG1_TYPE_REVOKE_SEND_BUF; 163 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0; 164 165 ret = vmbus_sendpacket(net_device->dev->channel, 166 revoke_packet, 167 sizeof(struct nvsp_message), 168 (unsigned long)revoke_packet, 169 VM_PKT_DATA_INBAND, 0); 170 /* If we failed here, we might as well return and 171 * have a leak rather than continue and a bugchk 172 */ 173 if (ret != 0) { 174 netdev_err(ndev, "unable to send " 175 "revoke send buffer to netvsp\n"); 176 return ret; 177 } 178 } 179 /* Teardown the gpadl on the vsp end */ 180 if (net_device->send_buf_gpadl_handle) { 181 ret = vmbus_teardown_gpadl(net_device->dev->channel, 182 net_device->send_buf_gpadl_handle); 183 184 /* If we failed here, we might as well return and have a leak 185 * rather than continue and a bugchk 186 */ 187 if (ret != 0) { 188 netdev_err(ndev, 189 "unable to teardown send buffer's gpadl\n"); 190 return ret; 191 } 192 net_device->send_buf_gpadl_handle = 0; 193 } 194 if (net_device->send_buf) { 195 /* Free up the receive buffer */ 196 vfree(net_device->send_buf); 197 net_device->send_buf = NULL; 198 } 199 kfree(net_device->send_section_map); 200 201 return ret; 202 } 203 204 static int netvsc_init_buf(struct hv_device *device) 205 { 206 int ret = 0; 207 int t; 208 struct netvsc_device *net_device; 209 struct nvsp_message *init_packet; 210 struct net_device *ndev; 211 212 net_device = get_outbound_net_device(device); 213 if (!net_device) 214 return -ENODEV; 215 ndev = net_device->ndev; 216 217 net_device->recv_buf = vzalloc(net_device->recv_buf_size); 218 if (!net_device->recv_buf) { 219 netdev_err(ndev, "unable to allocate receive " 220 "buffer of size %d\n", net_device->recv_buf_size); 221 ret = -ENOMEM; 222 goto cleanup; 223 } 224 225 /* 226 * Establish the gpadl handle for this buffer on this 227 * channel. Note: This call uses the vmbus connection rather 228 * than the channel to establish the gpadl handle. 229 */ 230 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, 231 net_device->recv_buf_size, 232 &net_device->recv_buf_gpadl_handle); 233 if (ret != 0) { 234 netdev_err(ndev, 235 "unable to establish receive buffer's gpadl\n"); 236 goto cleanup; 237 } 238 239 240 /* Notify the NetVsp of the gpadl handle */ 241 init_packet = &net_device->channel_init_pkt; 242 243 memset(init_packet, 0, sizeof(struct nvsp_message)); 244 245 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; 246 init_packet->msg.v1_msg.send_recv_buf. 247 gpadl_handle = net_device->recv_buf_gpadl_handle; 248 init_packet->msg.v1_msg. 249 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 250 251 /* Send the gpadl notification request */ 252 ret = vmbus_sendpacket(device->channel, init_packet, 253 sizeof(struct nvsp_message), 254 (unsigned long)init_packet, 255 VM_PKT_DATA_INBAND, 256 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 257 if (ret != 0) { 258 netdev_err(ndev, 259 "unable to send receive buffer's gpadl to netvsp\n"); 260 goto cleanup; 261 } 262 263 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 264 BUG_ON(t == 0); 265 266 267 /* Check the response */ 268 if (init_packet->msg.v1_msg. 269 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { 270 netdev_err(ndev, "Unable to complete receive buffer " 271 "initialization with NetVsp - status %d\n", 272 init_packet->msg.v1_msg. 273 send_recv_buf_complete.status); 274 ret = -EINVAL; 275 goto cleanup; 276 } 277 278 /* Parse the response */ 279 280 net_device->recv_section_cnt = init_packet->msg. 281 v1_msg.send_recv_buf_complete.num_sections; 282 283 net_device->recv_section = kmemdup( 284 init_packet->msg.v1_msg.send_recv_buf_complete.sections, 285 net_device->recv_section_cnt * 286 sizeof(struct nvsp_1_receive_buffer_section), 287 GFP_KERNEL); 288 if (net_device->recv_section == NULL) { 289 ret = -EINVAL; 290 goto cleanup; 291 } 292 293 /* 294 * For 1st release, there should only be 1 section that represents the 295 * entire receive buffer 296 */ 297 if (net_device->recv_section_cnt != 1 || 298 net_device->recv_section->offset != 0) { 299 ret = -EINVAL; 300 goto cleanup; 301 } 302 303 /* Now setup the send buffer. 304 */ 305 net_device->send_buf = vzalloc(net_device->send_buf_size); 306 if (!net_device->send_buf) { 307 netdev_err(ndev, "unable to allocate send " 308 "buffer of size %d\n", net_device->send_buf_size); 309 ret = -ENOMEM; 310 goto cleanup; 311 } 312 313 /* Establish the gpadl handle for this buffer on this 314 * channel. Note: This call uses the vmbus connection rather 315 * than the channel to establish the gpadl handle. 316 */ 317 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, 318 net_device->send_buf_size, 319 &net_device->send_buf_gpadl_handle); 320 if (ret != 0) { 321 netdev_err(ndev, 322 "unable to establish send buffer's gpadl\n"); 323 goto cleanup; 324 } 325 326 /* Notify the NetVsp of the gpadl handle */ 327 init_packet = &net_device->channel_init_pkt; 328 memset(init_packet, 0, sizeof(struct nvsp_message)); 329 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; 330 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle = 331 net_device->send_buf_gpadl_handle; 332 init_packet->msg.v1_msg.send_recv_buf.id = 0; 333 334 /* Send the gpadl notification request */ 335 ret = vmbus_sendpacket(device->channel, init_packet, 336 sizeof(struct nvsp_message), 337 (unsigned long)init_packet, 338 VM_PKT_DATA_INBAND, 339 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 340 if (ret != 0) { 341 netdev_err(ndev, 342 "unable to send send buffer's gpadl to netvsp\n"); 343 goto cleanup; 344 } 345 346 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 347 BUG_ON(t == 0); 348 349 /* Check the response */ 350 if (init_packet->msg.v1_msg. 351 send_send_buf_complete.status != NVSP_STAT_SUCCESS) { 352 netdev_err(ndev, "Unable to complete send buffer " 353 "initialization with NetVsp - status %d\n", 354 init_packet->msg.v1_msg. 355 send_recv_buf_complete.status); 356 ret = -EINVAL; 357 goto cleanup; 358 } 359 360 /* Parse the response */ 361 net_device->send_section_size = init_packet->msg. 362 v1_msg.send_send_buf_complete.section_size; 363 364 /* Section count is simply the size divided by the section size. 365 */ 366 net_device->send_section_cnt = 367 net_device->send_buf_size/net_device->send_section_size; 368 369 dev_info(&device->device, "Send section size: %d, Section count:%d\n", 370 net_device->send_section_size, net_device->send_section_cnt); 371 372 /* Setup state for managing the send buffer. */ 373 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, 374 BITS_PER_LONG); 375 376 net_device->send_section_map = 377 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); 378 if (net_device->send_section_map == NULL) { 379 ret = -ENOMEM; 380 goto cleanup; 381 } 382 383 goto exit; 384 385 cleanup: 386 netvsc_destroy_buf(net_device); 387 388 exit: 389 return ret; 390 } 391 392 393 /* Negotiate NVSP protocol version */ 394 static int negotiate_nvsp_ver(struct hv_device *device, 395 struct netvsc_device *net_device, 396 struct nvsp_message *init_packet, 397 u32 nvsp_ver) 398 { 399 int ret, t; 400 401 memset(init_packet, 0, sizeof(struct nvsp_message)); 402 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 403 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; 404 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; 405 406 /* Send the init request */ 407 ret = vmbus_sendpacket(device->channel, init_packet, 408 sizeof(struct nvsp_message), 409 (unsigned long)init_packet, 410 VM_PKT_DATA_INBAND, 411 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 412 413 if (ret != 0) 414 return ret; 415 416 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 417 418 if (t == 0) 419 return -ETIMEDOUT; 420 421 if (init_packet->msg.init_msg.init_complete.status != 422 NVSP_STAT_SUCCESS) 423 return -EINVAL; 424 425 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) 426 return 0; 427 428 /* NVSPv2 only: Send NDIS config */ 429 memset(init_packet, 0, sizeof(struct nvsp_message)); 430 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; 431 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu; 432 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; 433 434 ret = vmbus_sendpacket(device->channel, init_packet, 435 sizeof(struct nvsp_message), 436 (unsigned long)init_packet, 437 VM_PKT_DATA_INBAND, 0); 438 439 return ret; 440 } 441 442 static int netvsc_connect_vsp(struct hv_device *device) 443 { 444 int ret; 445 struct netvsc_device *net_device; 446 struct nvsp_message *init_packet; 447 int ndis_version; 448 struct net_device *ndev; 449 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, 450 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; 451 int i, num_ver = 4; /* number of different NVSP versions */ 452 453 net_device = get_outbound_net_device(device); 454 if (!net_device) 455 return -ENODEV; 456 ndev = net_device->ndev; 457 458 init_packet = &net_device->channel_init_pkt; 459 460 /* Negotiate the latest NVSP protocol supported */ 461 for (i = num_ver - 1; i >= 0; i--) 462 if (negotiate_nvsp_ver(device, net_device, init_packet, 463 ver_list[i]) == 0) { 464 net_device->nvsp_version = ver_list[i]; 465 break; 466 } 467 468 if (i < 0) { 469 ret = -EPROTO; 470 goto cleanup; 471 } 472 473 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); 474 475 /* Send the ndis version */ 476 memset(init_packet, 0, sizeof(struct nvsp_message)); 477 478 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 479 ndis_version = 0x00060001; 480 else 481 ndis_version = 0x0006001e; 482 483 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; 484 init_packet->msg.v1_msg. 485 send_ndis_ver.ndis_major_ver = 486 (ndis_version & 0xFFFF0000) >> 16; 487 init_packet->msg.v1_msg. 488 send_ndis_ver.ndis_minor_ver = 489 ndis_version & 0xFFFF; 490 491 /* Send the init request */ 492 ret = vmbus_sendpacket(device->channel, init_packet, 493 sizeof(struct nvsp_message), 494 (unsigned long)init_packet, 495 VM_PKT_DATA_INBAND, 0); 496 if (ret != 0) 497 goto cleanup; 498 499 /* Post the big receive buffer to NetVSP */ 500 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 501 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 502 else 503 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 504 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE; 505 506 ret = netvsc_init_buf(device); 507 508 cleanup: 509 return ret; 510 } 511 512 static void netvsc_disconnect_vsp(struct netvsc_device *net_device) 513 { 514 netvsc_destroy_buf(net_device); 515 } 516 517 /* 518 * netvsc_device_remove - Callback when the root bus device is removed 519 */ 520 int netvsc_device_remove(struct hv_device *device) 521 { 522 struct netvsc_device *net_device; 523 unsigned long flags; 524 525 net_device = hv_get_drvdata(device); 526 527 netvsc_disconnect_vsp(net_device); 528 529 /* 530 * Since we have already drained, we don't need to busy wait 531 * as was done in final_release_stor_device() 532 * Note that we cannot set the ext pointer to NULL until 533 * we have drained - to drain the outgoing packets, we need to 534 * allow incoming packets. 535 */ 536 537 spin_lock_irqsave(&device->channel->inbound_lock, flags); 538 hv_set_drvdata(device, NULL); 539 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 540 541 /* 542 * At this point, no one should be accessing net_device 543 * except in here 544 */ 545 dev_notice(&device->device, "net device safe to remove\n"); 546 547 /* Now, we can close the channel safely */ 548 vmbus_close(device->channel); 549 550 /* Release all resources */ 551 if (net_device->sub_cb_buf) 552 vfree(net_device->sub_cb_buf); 553 554 kfree(net_device); 555 return 0; 556 } 557 558 559 #define RING_AVAIL_PERCENT_HIWATER 20 560 #define RING_AVAIL_PERCENT_LOWATER 10 561 562 /* 563 * Get the percentage of available bytes to write in the ring. 564 * The return value is in range from 0 to 100. 565 */ 566 static inline u32 hv_ringbuf_avail_percent( 567 struct hv_ring_buffer_info *ring_info) 568 { 569 u32 avail_read, avail_write; 570 571 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); 572 573 return avail_write * 100 / ring_info->ring_datasize; 574 } 575 576 static inline void netvsc_free_send_slot(struct netvsc_device *net_device, 577 u32 index) 578 { 579 sync_change_bit(index, net_device->send_section_map); 580 } 581 582 static void netvsc_send_completion(struct netvsc_device *net_device, 583 struct hv_device *device, 584 struct vmpacket_descriptor *packet) 585 { 586 struct nvsp_message *nvsp_packet; 587 struct hv_netvsc_packet *nvsc_packet; 588 struct net_device *ndev; 589 u32 send_index; 590 591 ndev = net_device->ndev; 592 593 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 594 (packet->offset8 << 3)); 595 596 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || 597 (nvsp_packet->hdr.msg_type == 598 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || 599 (nvsp_packet->hdr.msg_type == 600 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) || 601 (nvsp_packet->hdr.msg_type == 602 NVSP_MSG5_TYPE_SUBCHANNEL)) { 603 /* Copy the response back */ 604 memcpy(&net_device->channel_init_pkt, nvsp_packet, 605 sizeof(struct nvsp_message)); 606 complete(&net_device->channel_init_wait); 607 } else if (nvsp_packet->hdr.msg_type == 608 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 609 int num_outstanding_sends; 610 u16 q_idx = 0; 611 struct vmbus_channel *channel = device->channel; 612 int queue_sends; 613 614 /* Get the send context */ 615 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 616 packet->trans_id; 617 618 /* Notify the layer above us */ 619 if (nvsc_packet) { 620 send_index = nvsc_packet->send_buf_index; 621 if (send_index != NETVSC_INVALID_INDEX) 622 netvsc_free_send_slot(net_device, send_index); 623 q_idx = nvsc_packet->q_idx; 624 channel = nvsc_packet->channel; 625 nvsc_packet->send_completion(nvsc_packet-> 626 send_completion_ctx); 627 } 628 629 num_outstanding_sends = 630 atomic_dec_return(&net_device->num_outstanding_sends); 631 queue_sends = atomic_dec_return(&net_device-> 632 queue_sends[q_idx]); 633 634 if (net_device->destroy && num_outstanding_sends == 0) 635 wake_up(&net_device->wait_drain); 636 637 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 638 !net_device->start_remove && 639 (hv_ringbuf_avail_percent(&channel->outbound) > 640 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) 641 netif_tx_wake_queue(netdev_get_tx_queue( 642 ndev, q_idx)); 643 } else { 644 netdev_err(ndev, "Unknown send completion packet type- " 645 "%d received!!\n", nvsp_packet->hdr.msg_type); 646 } 647 648 } 649 650 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) 651 { 652 unsigned long index; 653 u32 max_words = net_device->map_words; 654 unsigned long *map_addr = (unsigned long *)net_device->send_section_map; 655 u32 section_cnt = net_device->send_section_cnt; 656 int ret_val = NETVSC_INVALID_INDEX; 657 int i; 658 int prev_val; 659 660 for (i = 0; i < max_words; i++) { 661 if (!~(map_addr[i])) 662 continue; 663 index = ffz(map_addr[i]); 664 prev_val = sync_test_and_set_bit(index, &map_addr[i]); 665 if (prev_val) 666 continue; 667 if ((index + (i * BITS_PER_LONG)) >= section_cnt) 668 break; 669 ret_val = (index + (i * BITS_PER_LONG)); 670 break; 671 } 672 return ret_val; 673 } 674 675 u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, 676 unsigned int section_index, 677 struct hv_netvsc_packet *packet) 678 { 679 char *start = net_device->send_buf; 680 char *dest = (start + (section_index * net_device->send_section_size)); 681 int i; 682 u32 msg_size = 0; 683 684 for (i = 0; i < packet->page_buf_cnt; i++) { 685 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); 686 u32 offset = packet->page_buf[i].offset; 687 u32 len = packet->page_buf[i].len; 688 689 memcpy(dest, (src + offset), len); 690 msg_size += len; 691 dest += len; 692 } 693 return msg_size; 694 } 695 696 int netvsc_send(struct hv_device *device, 697 struct hv_netvsc_packet *packet) 698 { 699 struct netvsc_device *net_device; 700 int ret = 0; 701 struct nvsp_message sendMessage; 702 struct net_device *ndev; 703 struct vmbus_channel *out_channel = NULL; 704 u64 req_id; 705 unsigned int section_index = NETVSC_INVALID_INDEX; 706 u32 msg_size = 0; 707 struct sk_buff *skb; 708 709 710 net_device = get_outbound_net_device(device); 711 if (!net_device) 712 return -ENODEV; 713 ndev = net_device->ndev; 714 715 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 716 if (packet->is_data_pkt) { 717 /* 0 is RMC_DATA; */ 718 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0; 719 } else { 720 /* 1 is RMC_CONTROL; */ 721 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; 722 } 723 724 /* Attempt to send via sendbuf */ 725 if (packet->total_data_buflen < net_device->send_section_size) { 726 section_index = netvsc_get_next_send_section(net_device); 727 if (section_index != NETVSC_INVALID_INDEX) { 728 msg_size = netvsc_copy_to_send_buf(net_device, 729 section_index, 730 packet); 731 skb = (struct sk_buff *) 732 (unsigned long)packet->send_completion_tid; 733 if (skb) 734 dev_kfree_skb_any(skb); 735 packet->page_buf_cnt = 0; 736 } 737 } 738 packet->send_buf_index = section_index; 739 740 741 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = 742 section_index; 743 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size; 744 745 if (packet->send_completion) 746 req_id = (ulong)packet; 747 else 748 req_id = 0; 749 750 out_channel = net_device->chn_table[packet->q_idx]; 751 if (out_channel == NULL) 752 out_channel = device->channel; 753 packet->channel = out_channel; 754 755 if (packet->page_buf_cnt) { 756 ret = vmbus_sendpacket_pagebuffer(out_channel, 757 packet->page_buf, 758 packet->page_buf_cnt, 759 &sendMessage, 760 sizeof(struct nvsp_message), 761 req_id); 762 } else { 763 ret = vmbus_sendpacket(out_channel, &sendMessage, 764 sizeof(struct nvsp_message), 765 req_id, 766 VM_PKT_DATA_INBAND, 767 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 768 } 769 770 if (ret == 0) { 771 atomic_inc(&net_device->num_outstanding_sends); 772 atomic_inc(&net_device->queue_sends[packet->q_idx]); 773 774 if (hv_ringbuf_avail_percent(&out_channel->outbound) < 775 RING_AVAIL_PERCENT_LOWATER) { 776 netif_tx_stop_queue(netdev_get_tx_queue( 777 ndev, packet->q_idx)); 778 779 if (atomic_read(&net_device-> 780 queue_sends[packet->q_idx]) < 1) 781 netif_tx_wake_queue(netdev_get_tx_queue( 782 ndev, packet->q_idx)); 783 } 784 } else if (ret == -EAGAIN) { 785 netif_tx_stop_queue(netdev_get_tx_queue( 786 ndev, packet->q_idx)); 787 if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) { 788 netif_tx_wake_queue(netdev_get_tx_queue( 789 ndev, packet->q_idx)); 790 ret = -ENOSPC; 791 } 792 } else { 793 netdev_err(ndev, "Unable to send packet %p ret %d\n", 794 packet, ret); 795 } 796 797 return ret; 798 } 799 800 static void netvsc_send_recv_completion(struct hv_device *device, 801 struct vmbus_channel *channel, 802 struct netvsc_device *net_device, 803 u64 transaction_id, u32 status) 804 { 805 struct nvsp_message recvcompMessage; 806 int retries = 0; 807 int ret; 808 struct net_device *ndev; 809 810 ndev = net_device->ndev; 811 812 recvcompMessage.hdr.msg_type = 813 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; 814 815 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; 816 817 retry_send_cmplt: 818 /* Send the completion */ 819 ret = vmbus_sendpacket(channel, &recvcompMessage, 820 sizeof(struct nvsp_message), transaction_id, 821 VM_PKT_COMP, 0); 822 if (ret == 0) { 823 /* success */ 824 /* no-op */ 825 } else if (ret == -EAGAIN) { 826 /* no more room...wait a bit and attempt to retry 3 times */ 827 retries++; 828 netdev_err(ndev, "unable to send receive completion pkt" 829 " (tid %llx)...retrying %d\n", transaction_id, retries); 830 831 if (retries < 4) { 832 udelay(100); 833 goto retry_send_cmplt; 834 } else { 835 netdev_err(ndev, "unable to send receive " 836 "completion pkt (tid %llx)...give up retrying\n", 837 transaction_id); 838 } 839 } else { 840 netdev_err(ndev, "unable to send receive " 841 "completion pkt - %llx\n", transaction_id); 842 } 843 } 844 845 static void netvsc_receive(struct netvsc_device *net_device, 846 struct vmbus_channel *channel, 847 struct hv_device *device, 848 struct vmpacket_descriptor *packet) 849 { 850 struct vmtransfer_page_packet_header *vmxferpage_packet; 851 struct nvsp_message *nvsp_packet; 852 struct hv_netvsc_packet nv_pkt; 853 struct hv_netvsc_packet *netvsc_packet = &nv_pkt; 854 u32 status = NVSP_STAT_SUCCESS; 855 int i; 856 int count = 0; 857 struct net_device *ndev; 858 859 ndev = net_device->ndev; 860 861 /* 862 * All inbound packets other than send completion should be xfer page 863 * packet 864 */ 865 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { 866 netdev_err(ndev, "Unknown packet type received - %d\n", 867 packet->type); 868 return; 869 } 870 871 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 872 (packet->offset8 << 3)); 873 874 /* Make sure this is a valid nvsp packet */ 875 if (nvsp_packet->hdr.msg_type != 876 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { 877 netdev_err(ndev, "Unknown nvsp packet type received-" 878 " %d\n", nvsp_packet->hdr.msg_type); 879 return; 880 } 881 882 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; 883 884 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { 885 netdev_err(ndev, "Invalid xfer page set id - " 886 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, 887 vmxferpage_packet->xfer_pageset_id); 888 return; 889 } 890 891 count = vmxferpage_packet->range_cnt; 892 netvsc_packet->device = device; 893 netvsc_packet->channel = channel; 894 895 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 896 for (i = 0; i < count; i++) { 897 /* Initialize the netvsc packet */ 898 netvsc_packet->status = NVSP_STAT_SUCCESS; 899 netvsc_packet->data = (void *)((unsigned long)net_device-> 900 recv_buf + vmxferpage_packet->ranges[i].byte_offset); 901 netvsc_packet->total_data_buflen = 902 vmxferpage_packet->ranges[i].byte_count; 903 904 /* Pass it to the upper layer */ 905 rndis_filter_receive(device, netvsc_packet); 906 907 if (netvsc_packet->status != NVSP_STAT_SUCCESS) 908 status = NVSP_STAT_FAIL; 909 } 910 911 netvsc_send_recv_completion(device, channel, net_device, 912 vmxferpage_packet->d.trans_id, status); 913 } 914 915 916 static void netvsc_send_table(struct hv_device *hdev, 917 struct vmpacket_descriptor *vmpkt) 918 { 919 struct netvsc_device *nvscdev; 920 struct net_device *ndev; 921 struct nvsp_message *nvmsg; 922 int i; 923 u32 count, *tab; 924 925 nvscdev = get_outbound_net_device(hdev); 926 if (!nvscdev) 927 return; 928 ndev = nvscdev->ndev; 929 930 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt + 931 (vmpkt->offset8 << 3)); 932 933 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE) 934 return; 935 936 count = nvmsg->msg.v5_msg.send_table.count; 937 if (count != VRSS_SEND_TAB_SIZE) { 938 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 939 return; 940 } 941 942 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + 943 nvmsg->msg.v5_msg.send_table.offset); 944 945 for (i = 0; i < count; i++) 946 nvscdev->send_table[i] = tab[i]; 947 } 948 949 void netvsc_channel_cb(void *context) 950 { 951 int ret; 952 struct vmbus_channel *channel = (struct vmbus_channel *)context; 953 struct hv_device *device; 954 struct netvsc_device *net_device; 955 u32 bytes_recvd; 956 u64 request_id; 957 struct vmpacket_descriptor *desc; 958 unsigned char *buffer; 959 int bufferlen = NETVSC_PACKET_SIZE; 960 struct net_device *ndev; 961 962 if (channel->primary_channel != NULL) 963 device = channel->primary_channel->device_obj; 964 else 965 device = channel->device_obj; 966 967 net_device = get_inbound_net_device(device); 968 if (!net_device) 969 return; 970 ndev = net_device->ndev; 971 buffer = get_per_channel_state(channel); 972 973 do { 974 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, 975 &bytes_recvd, &request_id); 976 if (ret == 0) { 977 if (bytes_recvd > 0) { 978 desc = (struct vmpacket_descriptor *)buffer; 979 switch (desc->type) { 980 case VM_PKT_COMP: 981 netvsc_send_completion(net_device, 982 device, desc); 983 break; 984 985 case VM_PKT_DATA_USING_XFER_PAGES: 986 netvsc_receive(net_device, channel, 987 device, desc); 988 break; 989 990 case VM_PKT_DATA_INBAND: 991 netvsc_send_table(device, desc); 992 break; 993 994 default: 995 netdev_err(ndev, 996 "unhandled packet type %d, " 997 "tid %llx len %d\n", 998 desc->type, request_id, 999 bytes_recvd); 1000 break; 1001 } 1002 1003 } else { 1004 /* 1005 * We are done for this pass. 1006 */ 1007 break; 1008 } 1009 1010 } else if (ret == -ENOBUFS) { 1011 if (bufferlen > NETVSC_PACKET_SIZE) 1012 kfree(buffer); 1013 /* Handle large packet */ 1014 buffer = kmalloc(bytes_recvd, GFP_ATOMIC); 1015 if (buffer == NULL) { 1016 /* Try again next time around */ 1017 netdev_err(ndev, 1018 "unable to allocate buffer of size " 1019 "(%d)!!\n", bytes_recvd); 1020 break; 1021 } 1022 1023 bufferlen = bytes_recvd; 1024 } 1025 } while (1); 1026 1027 if (bufferlen > NETVSC_PACKET_SIZE) 1028 kfree(buffer); 1029 return; 1030 } 1031 1032 /* 1033 * netvsc_device_add - Callback when the device belonging to this 1034 * driver is added 1035 */ 1036 int netvsc_device_add(struct hv_device *device, void *additional_info) 1037 { 1038 int ret = 0; 1039 int ring_size = 1040 ((struct netvsc_device_info *)additional_info)->ring_size; 1041 struct netvsc_device *net_device; 1042 struct net_device *ndev; 1043 1044 net_device = alloc_net_device(device); 1045 if (!net_device) { 1046 ret = -ENOMEM; 1047 goto cleanup; 1048 } 1049 1050 net_device->ring_size = ring_size; 1051 1052 /* 1053 * Coming into this function, struct net_device * is 1054 * registered as the driver private data. 1055 * In alloc_net_device(), we register struct netvsc_device * 1056 * as the driver private data and stash away struct net_device * 1057 * in struct netvsc_device *. 1058 */ 1059 ndev = net_device->ndev; 1060 1061 /* Initialize the NetVSC channel extension */ 1062 init_completion(&net_device->channel_init_wait); 1063 1064 set_per_channel_state(device->channel, net_device->cb_buffer); 1065 1066 /* Open the channel */ 1067 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1068 ring_size * PAGE_SIZE, NULL, 0, 1069 netvsc_channel_cb, device->channel); 1070 1071 if (ret != 0) { 1072 netdev_err(ndev, "unable to open channel: %d\n", ret); 1073 goto cleanup; 1074 } 1075 1076 /* Channel is opened */ 1077 pr_info("hv_netvsc channel opened successfully\n"); 1078 1079 net_device->chn_table[0] = device->channel; 1080 1081 /* Connect with the NetVsp */ 1082 ret = netvsc_connect_vsp(device); 1083 if (ret != 0) { 1084 netdev_err(ndev, 1085 "unable to connect to NetVSP - %d\n", ret); 1086 goto close; 1087 } 1088 1089 return ret; 1090 1091 close: 1092 /* Now, we can close the channel safely */ 1093 vmbus_close(device->channel); 1094 1095 cleanup: 1096 kfree(net_device); 1097 1098 return ret; 1099 } 1100