1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/wait.h> 25 #include <linux/mm.h> 26 #include <linux/delay.h> 27 #include <linux/io.h> 28 #include <linux/slab.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_ether.h> 31 #include <asm/sync_bitops.h> 32 33 #include "hyperv_net.h" 34 35 36 static struct netvsc_device *alloc_net_device(struct hv_device *device) 37 { 38 struct netvsc_device *net_device; 39 struct net_device *ndev = hv_get_drvdata(device); 40 41 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); 42 if (!net_device) 43 return NULL; 44 45 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); 46 if (!net_device->cb_buffer) { 47 kfree(net_device); 48 return NULL; 49 } 50 51 init_waitqueue_head(&net_device->wait_drain); 52 net_device->start_remove = false; 53 net_device->destroy = false; 54 net_device->dev = device; 55 net_device->ndev = ndev; 56 57 hv_set_drvdata(device, net_device); 58 return net_device; 59 } 60 61 static void free_netvsc_device(struct netvsc_device *nvdev) 62 { 63 kfree(nvdev->cb_buffer); 64 kfree(nvdev); 65 } 66 67 static struct netvsc_device *get_outbound_net_device(struct hv_device *device) 68 { 69 struct netvsc_device *net_device; 70 71 net_device = hv_get_drvdata(device); 72 if (net_device && net_device->destroy) 73 net_device = NULL; 74 75 return net_device; 76 } 77 78 static struct netvsc_device *get_inbound_net_device(struct hv_device *device) 79 { 80 struct netvsc_device *net_device; 81 82 net_device = hv_get_drvdata(device); 83 84 if (!net_device) 85 goto get_in_err; 86 87 if (net_device->destroy && 88 atomic_read(&net_device->num_outstanding_sends) == 0) 89 net_device = NULL; 90 91 get_in_err: 92 return net_device; 93 } 94 95 96 static int netvsc_destroy_buf(struct netvsc_device *net_device) 97 { 98 struct nvsp_message *revoke_packet; 99 int ret = 0; 100 struct net_device *ndev = net_device->ndev; 101 102 /* 103 * If we got a section count, it means we received a 104 * SendReceiveBufferComplete msg (ie sent 105 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 106 * to send a revoke msg here 107 */ 108 if (net_device->recv_section_cnt) { 109 /* Send the revoke receive buffer */ 110 revoke_packet = &net_device->revoke_packet; 111 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 112 113 revoke_packet->hdr.msg_type = 114 NVSP_MSG1_TYPE_REVOKE_RECV_BUF; 115 revoke_packet->msg.v1_msg. 116 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 117 118 ret = vmbus_sendpacket(net_device->dev->channel, 119 revoke_packet, 120 sizeof(struct nvsp_message), 121 (unsigned long)revoke_packet, 122 VM_PKT_DATA_INBAND, 0); 123 /* 124 * If we failed here, we might as well return and 125 * have a leak rather than continue and a bugchk 126 */ 127 if (ret != 0) { 128 netdev_err(ndev, "unable to send " 129 "revoke receive buffer to netvsp\n"); 130 return ret; 131 } 132 } 133 134 /* Teardown the gpadl on the vsp end */ 135 if (net_device->recv_buf_gpadl_handle) { 136 ret = vmbus_teardown_gpadl(net_device->dev->channel, 137 net_device->recv_buf_gpadl_handle); 138 139 /* If we failed here, we might as well return and have a leak 140 * rather than continue and a bugchk 141 */ 142 if (ret != 0) { 143 netdev_err(ndev, 144 "unable to teardown receive buffer's gpadl\n"); 145 return ret; 146 } 147 net_device->recv_buf_gpadl_handle = 0; 148 } 149 150 if (net_device->recv_buf) { 151 /* Free up the receive buffer */ 152 vfree(net_device->recv_buf); 153 net_device->recv_buf = NULL; 154 } 155 156 if (net_device->recv_section) { 157 net_device->recv_section_cnt = 0; 158 kfree(net_device->recv_section); 159 net_device->recv_section = NULL; 160 } 161 162 /* Deal with the send buffer we may have setup. 163 * If we got a send section size, it means we received a 164 * SendsendBufferComplete msg (ie sent 165 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 166 * to send a revoke msg here 167 */ 168 if (net_device->send_section_size) { 169 /* Send the revoke receive buffer */ 170 revoke_packet = &net_device->revoke_packet; 171 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 172 173 revoke_packet->hdr.msg_type = 174 NVSP_MSG1_TYPE_REVOKE_SEND_BUF; 175 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0; 176 177 ret = vmbus_sendpacket(net_device->dev->channel, 178 revoke_packet, 179 sizeof(struct nvsp_message), 180 (unsigned long)revoke_packet, 181 VM_PKT_DATA_INBAND, 0); 182 /* If we failed here, we might as well return and 183 * have a leak rather than continue and a bugchk 184 */ 185 if (ret != 0) { 186 netdev_err(ndev, "unable to send " 187 "revoke send buffer to netvsp\n"); 188 return ret; 189 } 190 } 191 /* Teardown the gpadl on the vsp end */ 192 if (net_device->send_buf_gpadl_handle) { 193 ret = vmbus_teardown_gpadl(net_device->dev->channel, 194 net_device->send_buf_gpadl_handle); 195 196 /* If we failed here, we might as well return and have a leak 197 * rather than continue and a bugchk 198 */ 199 if (ret != 0) { 200 netdev_err(ndev, 201 "unable to teardown send buffer's gpadl\n"); 202 return ret; 203 } 204 net_device->send_buf_gpadl_handle = 0; 205 } 206 if (net_device->send_buf) { 207 /* Free up the receive buffer */ 208 vfree(net_device->send_buf); 209 net_device->send_buf = NULL; 210 } 211 kfree(net_device->send_section_map); 212 213 return ret; 214 } 215 216 static int netvsc_init_buf(struct hv_device *device) 217 { 218 int ret = 0; 219 int t; 220 struct netvsc_device *net_device; 221 struct nvsp_message *init_packet; 222 struct net_device *ndev; 223 224 net_device = get_outbound_net_device(device); 225 if (!net_device) 226 return -ENODEV; 227 ndev = net_device->ndev; 228 229 net_device->recv_buf = vzalloc(net_device->recv_buf_size); 230 if (!net_device->recv_buf) { 231 netdev_err(ndev, "unable to allocate receive " 232 "buffer of size %d\n", net_device->recv_buf_size); 233 ret = -ENOMEM; 234 goto cleanup; 235 } 236 237 /* 238 * Establish the gpadl handle for this buffer on this 239 * channel. Note: This call uses the vmbus connection rather 240 * than the channel to establish the gpadl handle. 241 */ 242 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, 243 net_device->recv_buf_size, 244 &net_device->recv_buf_gpadl_handle); 245 if (ret != 0) { 246 netdev_err(ndev, 247 "unable to establish receive buffer's gpadl\n"); 248 goto cleanup; 249 } 250 251 252 /* Notify the NetVsp of the gpadl handle */ 253 init_packet = &net_device->channel_init_pkt; 254 255 memset(init_packet, 0, sizeof(struct nvsp_message)); 256 257 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; 258 init_packet->msg.v1_msg.send_recv_buf. 259 gpadl_handle = net_device->recv_buf_gpadl_handle; 260 init_packet->msg.v1_msg. 261 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 262 263 /* Send the gpadl notification request */ 264 ret = vmbus_sendpacket(device->channel, init_packet, 265 sizeof(struct nvsp_message), 266 (unsigned long)init_packet, 267 VM_PKT_DATA_INBAND, 268 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 269 if (ret != 0) { 270 netdev_err(ndev, 271 "unable to send receive buffer's gpadl to netvsp\n"); 272 goto cleanup; 273 } 274 275 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 276 BUG_ON(t == 0); 277 278 279 /* Check the response */ 280 if (init_packet->msg.v1_msg. 281 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { 282 netdev_err(ndev, "Unable to complete receive buffer " 283 "initialization with NetVsp - status %d\n", 284 init_packet->msg.v1_msg. 285 send_recv_buf_complete.status); 286 ret = -EINVAL; 287 goto cleanup; 288 } 289 290 /* Parse the response */ 291 292 net_device->recv_section_cnt = init_packet->msg. 293 v1_msg.send_recv_buf_complete.num_sections; 294 295 net_device->recv_section = kmemdup( 296 init_packet->msg.v1_msg.send_recv_buf_complete.sections, 297 net_device->recv_section_cnt * 298 sizeof(struct nvsp_1_receive_buffer_section), 299 GFP_KERNEL); 300 if (net_device->recv_section == NULL) { 301 ret = -EINVAL; 302 goto cleanup; 303 } 304 305 /* 306 * For 1st release, there should only be 1 section that represents the 307 * entire receive buffer 308 */ 309 if (net_device->recv_section_cnt != 1 || 310 net_device->recv_section->offset != 0) { 311 ret = -EINVAL; 312 goto cleanup; 313 } 314 315 /* Now setup the send buffer. 316 */ 317 net_device->send_buf = vzalloc(net_device->send_buf_size); 318 if (!net_device->send_buf) { 319 netdev_err(ndev, "unable to allocate send " 320 "buffer of size %d\n", net_device->send_buf_size); 321 ret = -ENOMEM; 322 goto cleanup; 323 } 324 325 /* Establish the gpadl handle for this buffer on this 326 * channel. Note: This call uses the vmbus connection rather 327 * than the channel to establish the gpadl handle. 328 */ 329 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, 330 net_device->send_buf_size, 331 &net_device->send_buf_gpadl_handle); 332 if (ret != 0) { 333 netdev_err(ndev, 334 "unable to establish send buffer's gpadl\n"); 335 goto cleanup; 336 } 337 338 /* Notify the NetVsp of the gpadl handle */ 339 init_packet = &net_device->channel_init_pkt; 340 memset(init_packet, 0, sizeof(struct nvsp_message)); 341 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; 342 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle = 343 net_device->send_buf_gpadl_handle; 344 init_packet->msg.v1_msg.send_recv_buf.id = 0; 345 346 /* Send the gpadl notification request */ 347 ret = vmbus_sendpacket(device->channel, init_packet, 348 sizeof(struct nvsp_message), 349 (unsigned long)init_packet, 350 VM_PKT_DATA_INBAND, 351 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 352 if (ret != 0) { 353 netdev_err(ndev, 354 "unable to send send buffer's gpadl to netvsp\n"); 355 goto cleanup; 356 } 357 358 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 359 BUG_ON(t == 0); 360 361 /* Check the response */ 362 if (init_packet->msg.v1_msg. 363 send_send_buf_complete.status != NVSP_STAT_SUCCESS) { 364 netdev_err(ndev, "Unable to complete send buffer " 365 "initialization with NetVsp - status %d\n", 366 init_packet->msg.v1_msg. 367 send_recv_buf_complete.status); 368 ret = -EINVAL; 369 goto cleanup; 370 } 371 372 /* Parse the response */ 373 net_device->send_section_size = init_packet->msg. 374 v1_msg.send_send_buf_complete.section_size; 375 376 /* Section count is simply the size divided by the section size. 377 */ 378 net_device->send_section_cnt = 379 net_device->send_buf_size/net_device->send_section_size; 380 381 dev_info(&device->device, "Send section size: %d, Section count:%d\n", 382 net_device->send_section_size, net_device->send_section_cnt); 383 384 /* Setup state for managing the send buffer. */ 385 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, 386 BITS_PER_LONG); 387 388 net_device->send_section_map = 389 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); 390 if (net_device->send_section_map == NULL) { 391 ret = -ENOMEM; 392 goto cleanup; 393 } 394 395 goto exit; 396 397 cleanup: 398 netvsc_destroy_buf(net_device); 399 400 exit: 401 return ret; 402 } 403 404 405 /* Negotiate NVSP protocol version */ 406 static int negotiate_nvsp_ver(struct hv_device *device, 407 struct netvsc_device *net_device, 408 struct nvsp_message *init_packet, 409 u32 nvsp_ver) 410 { 411 int ret, t; 412 413 memset(init_packet, 0, sizeof(struct nvsp_message)); 414 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 415 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; 416 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; 417 418 /* Send the init request */ 419 ret = vmbus_sendpacket(device->channel, init_packet, 420 sizeof(struct nvsp_message), 421 (unsigned long)init_packet, 422 VM_PKT_DATA_INBAND, 423 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 424 425 if (ret != 0) 426 return ret; 427 428 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 429 430 if (t == 0) 431 return -ETIMEDOUT; 432 433 if (init_packet->msg.init_msg.init_complete.status != 434 NVSP_STAT_SUCCESS) 435 return -EINVAL; 436 437 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) 438 return 0; 439 440 /* NVSPv2 only: Send NDIS config */ 441 memset(init_packet, 0, sizeof(struct nvsp_message)); 442 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; 443 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu; 444 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; 445 446 ret = vmbus_sendpacket(device->channel, init_packet, 447 sizeof(struct nvsp_message), 448 (unsigned long)init_packet, 449 VM_PKT_DATA_INBAND, 0); 450 451 return ret; 452 } 453 454 static int netvsc_connect_vsp(struct hv_device *device) 455 { 456 int ret; 457 struct netvsc_device *net_device; 458 struct nvsp_message *init_packet; 459 int ndis_version; 460 struct net_device *ndev; 461 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, 462 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; 463 int i, num_ver = 4; /* number of different NVSP versions */ 464 465 net_device = get_outbound_net_device(device); 466 if (!net_device) 467 return -ENODEV; 468 ndev = net_device->ndev; 469 470 init_packet = &net_device->channel_init_pkt; 471 472 /* Negotiate the latest NVSP protocol supported */ 473 for (i = num_ver - 1; i >= 0; i--) 474 if (negotiate_nvsp_ver(device, net_device, init_packet, 475 ver_list[i]) == 0) { 476 net_device->nvsp_version = ver_list[i]; 477 break; 478 } 479 480 if (i < 0) { 481 ret = -EPROTO; 482 goto cleanup; 483 } 484 485 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); 486 487 /* Send the ndis version */ 488 memset(init_packet, 0, sizeof(struct nvsp_message)); 489 490 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 491 ndis_version = 0x00060001; 492 else 493 ndis_version = 0x0006001e; 494 495 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; 496 init_packet->msg.v1_msg. 497 send_ndis_ver.ndis_major_ver = 498 (ndis_version & 0xFFFF0000) >> 16; 499 init_packet->msg.v1_msg. 500 send_ndis_ver.ndis_minor_ver = 501 ndis_version & 0xFFFF; 502 503 /* Send the init request */ 504 ret = vmbus_sendpacket(device->channel, init_packet, 505 sizeof(struct nvsp_message), 506 (unsigned long)init_packet, 507 VM_PKT_DATA_INBAND, 0); 508 if (ret != 0) 509 goto cleanup; 510 511 /* Post the big receive buffer to NetVSP */ 512 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 513 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 514 else 515 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 516 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE; 517 518 ret = netvsc_init_buf(device); 519 520 cleanup: 521 return ret; 522 } 523 524 static void netvsc_disconnect_vsp(struct netvsc_device *net_device) 525 { 526 netvsc_destroy_buf(net_device); 527 } 528 529 /* 530 * netvsc_device_remove - Callback when the root bus device is removed 531 */ 532 int netvsc_device_remove(struct hv_device *device) 533 { 534 struct netvsc_device *net_device; 535 unsigned long flags; 536 537 net_device = hv_get_drvdata(device); 538 539 netvsc_disconnect_vsp(net_device); 540 541 /* 542 * Since we have already drained, we don't need to busy wait 543 * as was done in final_release_stor_device() 544 * Note that we cannot set the ext pointer to NULL until 545 * we have drained - to drain the outgoing packets, we need to 546 * allow incoming packets. 547 */ 548 549 spin_lock_irqsave(&device->channel->inbound_lock, flags); 550 hv_set_drvdata(device, NULL); 551 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 552 553 /* 554 * At this point, no one should be accessing net_device 555 * except in here 556 */ 557 dev_notice(&device->device, "net device safe to remove\n"); 558 559 /* Now, we can close the channel safely */ 560 vmbus_close(device->channel); 561 562 /* Release all resources */ 563 if (net_device->sub_cb_buf) 564 vfree(net_device->sub_cb_buf); 565 566 free_netvsc_device(net_device); 567 return 0; 568 } 569 570 571 #define RING_AVAIL_PERCENT_HIWATER 20 572 #define RING_AVAIL_PERCENT_LOWATER 10 573 574 /* 575 * Get the percentage of available bytes to write in the ring. 576 * The return value is in range from 0 to 100. 577 */ 578 static inline u32 hv_ringbuf_avail_percent( 579 struct hv_ring_buffer_info *ring_info) 580 { 581 u32 avail_read, avail_write; 582 583 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); 584 585 return avail_write * 100 / ring_info->ring_datasize; 586 } 587 588 static inline void netvsc_free_send_slot(struct netvsc_device *net_device, 589 u32 index) 590 { 591 sync_change_bit(index, net_device->send_section_map); 592 } 593 594 static void netvsc_send_completion(struct netvsc_device *net_device, 595 struct hv_device *device, 596 struct vmpacket_descriptor *packet) 597 { 598 struct nvsp_message *nvsp_packet; 599 struct hv_netvsc_packet *nvsc_packet; 600 struct net_device *ndev; 601 u32 send_index; 602 603 ndev = net_device->ndev; 604 605 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 606 (packet->offset8 << 3)); 607 608 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || 609 (nvsp_packet->hdr.msg_type == 610 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || 611 (nvsp_packet->hdr.msg_type == 612 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) || 613 (nvsp_packet->hdr.msg_type == 614 NVSP_MSG5_TYPE_SUBCHANNEL)) { 615 /* Copy the response back */ 616 memcpy(&net_device->channel_init_pkt, nvsp_packet, 617 sizeof(struct nvsp_message)); 618 complete(&net_device->channel_init_wait); 619 } else if (nvsp_packet->hdr.msg_type == 620 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 621 int num_outstanding_sends; 622 u16 q_idx = 0; 623 struct vmbus_channel *channel = device->channel; 624 int queue_sends; 625 626 /* Get the send context */ 627 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 628 packet->trans_id; 629 630 /* Notify the layer above us */ 631 if (nvsc_packet) { 632 send_index = nvsc_packet->send_buf_index; 633 if (send_index != NETVSC_INVALID_INDEX) 634 netvsc_free_send_slot(net_device, send_index); 635 q_idx = nvsc_packet->q_idx; 636 channel = nvsc_packet->channel; 637 nvsc_packet->send_completion(nvsc_packet-> 638 send_completion_ctx); 639 } 640 641 num_outstanding_sends = 642 atomic_dec_return(&net_device->num_outstanding_sends); 643 queue_sends = atomic_dec_return(&net_device-> 644 queue_sends[q_idx]); 645 646 if (net_device->destroy && num_outstanding_sends == 0) 647 wake_up(&net_device->wait_drain); 648 649 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 650 !net_device->start_remove && 651 (hv_ringbuf_avail_percent(&channel->outbound) > 652 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) 653 netif_tx_wake_queue(netdev_get_tx_queue( 654 ndev, q_idx)); 655 } else { 656 netdev_err(ndev, "Unknown send completion packet type- " 657 "%d received!!\n", nvsp_packet->hdr.msg_type); 658 } 659 660 } 661 662 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) 663 { 664 unsigned long index; 665 u32 max_words = net_device->map_words; 666 unsigned long *map_addr = (unsigned long *)net_device->send_section_map; 667 u32 section_cnt = net_device->send_section_cnt; 668 int ret_val = NETVSC_INVALID_INDEX; 669 int i; 670 int prev_val; 671 672 for (i = 0; i < max_words; i++) { 673 if (!~(map_addr[i])) 674 continue; 675 index = ffz(map_addr[i]); 676 prev_val = sync_test_and_set_bit(index, &map_addr[i]); 677 if (prev_val) 678 continue; 679 if ((index + (i * BITS_PER_LONG)) >= section_cnt) 680 break; 681 ret_val = (index + (i * BITS_PER_LONG)); 682 break; 683 } 684 return ret_val; 685 } 686 687 u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, 688 unsigned int section_index, 689 struct hv_netvsc_packet *packet) 690 { 691 char *start = net_device->send_buf; 692 char *dest = (start + (section_index * net_device->send_section_size)); 693 int i; 694 u32 msg_size = 0; 695 696 for (i = 0; i < packet->page_buf_cnt; i++) { 697 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); 698 u32 offset = packet->page_buf[i].offset; 699 u32 len = packet->page_buf[i].len; 700 701 memcpy(dest, (src + offset), len); 702 msg_size += len; 703 dest += len; 704 } 705 return msg_size; 706 } 707 708 int netvsc_send(struct hv_device *device, 709 struct hv_netvsc_packet *packet) 710 { 711 struct netvsc_device *net_device; 712 int ret = 0; 713 struct nvsp_message sendMessage; 714 struct net_device *ndev; 715 struct vmbus_channel *out_channel = NULL; 716 u64 req_id; 717 unsigned int section_index = NETVSC_INVALID_INDEX; 718 u32 msg_size = 0; 719 struct sk_buff *skb; 720 u16 q_idx = packet->q_idx; 721 722 723 net_device = get_outbound_net_device(device); 724 if (!net_device) 725 return -ENODEV; 726 ndev = net_device->ndev; 727 728 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 729 if (packet->is_data_pkt) { 730 /* 0 is RMC_DATA; */ 731 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0; 732 } else { 733 /* 1 is RMC_CONTROL; */ 734 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; 735 } 736 737 /* Attempt to send via sendbuf */ 738 if (packet->total_data_buflen < net_device->send_section_size) { 739 section_index = netvsc_get_next_send_section(net_device); 740 if (section_index != NETVSC_INVALID_INDEX) { 741 msg_size = netvsc_copy_to_send_buf(net_device, 742 section_index, 743 packet); 744 skb = (struct sk_buff *) 745 (unsigned long)packet->send_completion_tid; 746 if (skb) 747 dev_kfree_skb_any(skb); 748 packet->page_buf_cnt = 0; 749 } 750 } 751 packet->send_buf_index = section_index; 752 753 754 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = 755 section_index; 756 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size; 757 758 if (packet->send_completion) 759 req_id = (ulong)packet; 760 else 761 req_id = 0; 762 763 out_channel = net_device->chn_table[packet->q_idx]; 764 if (out_channel == NULL) 765 out_channel = device->channel; 766 packet->channel = out_channel; 767 768 if (packet->page_buf_cnt) { 769 ret = vmbus_sendpacket_pagebuffer(out_channel, 770 packet->page_buf, 771 packet->page_buf_cnt, 772 &sendMessage, 773 sizeof(struct nvsp_message), 774 req_id); 775 } else { 776 ret = vmbus_sendpacket(out_channel, &sendMessage, 777 sizeof(struct nvsp_message), 778 req_id, 779 VM_PKT_DATA_INBAND, 780 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 781 } 782 783 if (ret == 0) { 784 atomic_inc(&net_device->num_outstanding_sends); 785 atomic_inc(&net_device->queue_sends[q_idx]); 786 787 if (hv_ringbuf_avail_percent(&out_channel->outbound) < 788 RING_AVAIL_PERCENT_LOWATER) { 789 netif_tx_stop_queue(netdev_get_tx_queue( 790 ndev, q_idx)); 791 792 if (atomic_read(&net_device-> 793 queue_sends[q_idx]) < 1) 794 netif_tx_wake_queue(netdev_get_tx_queue( 795 ndev, q_idx)); 796 } 797 } else if (ret == -EAGAIN) { 798 netif_tx_stop_queue(netdev_get_tx_queue( 799 ndev, q_idx)); 800 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { 801 netif_tx_wake_queue(netdev_get_tx_queue( 802 ndev, q_idx)); 803 ret = -ENOSPC; 804 } 805 } else { 806 netdev_err(ndev, "Unable to send packet %p ret %d\n", 807 packet, ret); 808 } 809 810 return ret; 811 } 812 813 static void netvsc_send_recv_completion(struct hv_device *device, 814 struct vmbus_channel *channel, 815 struct netvsc_device *net_device, 816 u64 transaction_id, u32 status) 817 { 818 struct nvsp_message recvcompMessage; 819 int retries = 0; 820 int ret; 821 struct net_device *ndev; 822 823 ndev = net_device->ndev; 824 825 recvcompMessage.hdr.msg_type = 826 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; 827 828 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; 829 830 retry_send_cmplt: 831 /* Send the completion */ 832 ret = vmbus_sendpacket(channel, &recvcompMessage, 833 sizeof(struct nvsp_message), transaction_id, 834 VM_PKT_COMP, 0); 835 if (ret == 0) { 836 /* success */ 837 /* no-op */ 838 } else if (ret == -EAGAIN) { 839 /* no more room...wait a bit and attempt to retry 3 times */ 840 retries++; 841 netdev_err(ndev, "unable to send receive completion pkt" 842 " (tid %llx)...retrying %d\n", transaction_id, retries); 843 844 if (retries < 4) { 845 udelay(100); 846 goto retry_send_cmplt; 847 } else { 848 netdev_err(ndev, "unable to send receive " 849 "completion pkt (tid %llx)...give up retrying\n", 850 transaction_id); 851 } 852 } else { 853 netdev_err(ndev, "unable to send receive " 854 "completion pkt - %llx\n", transaction_id); 855 } 856 } 857 858 static void netvsc_receive(struct netvsc_device *net_device, 859 struct vmbus_channel *channel, 860 struct hv_device *device, 861 struct vmpacket_descriptor *packet) 862 { 863 struct vmtransfer_page_packet_header *vmxferpage_packet; 864 struct nvsp_message *nvsp_packet; 865 struct hv_netvsc_packet nv_pkt; 866 struct hv_netvsc_packet *netvsc_packet = &nv_pkt; 867 u32 status = NVSP_STAT_SUCCESS; 868 int i; 869 int count = 0; 870 struct net_device *ndev; 871 872 ndev = net_device->ndev; 873 874 /* 875 * All inbound packets other than send completion should be xfer page 876 * packet 877 */ 878 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { 879 netdev_err(ndev, "Unknown packet type received - %d\n", 880 packet->type); 881 return; 882 } 883 884 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 885 (packet->offset8 << 3)); 886 887 /* Make sure this is a valid nvsp packet */ 888 if (nvsp_packet->hdr.msg_type != 889 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { 890 netdev_err(ndev, "Unknown nvsp packet type received-" 891 " %d\n", nvsp_packet->hdr.msg_type); 892 return; 893 } 894 895 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; 896 897 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { 898 netdev_err(ndev, "Invalid xfer page set id - " 899 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, 900 vmxferpage_packet->xfer_pageset_id); 901 return; 902 } 903 904 count = vmxferpage_packet->range_cnt; 905 netvsc_packet->device = device; 906 netvsc_packet->channel = channel; 907 908 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 909 for (i = 0; i < count; i++) { 910 /* Initialize the netvsc packet */ 911 netvsc_packet->status = NVSP_STAT_SUCCESS; 912 netvsc_packet->data = (void *)((unsigned long)net_device-> 913 recv_buf + vmxferpage_packet->ranges[i].byte_offset); 914 netvsc_packet->total_data_buflen = 915 vmxferpage_packet->ranges[i].byte_count; 916 917 /* Pass it to the upper layer */ 918 rndis_filter_receive(device, netvsc_packet); 919 920 if (netvsc_packet->status != NVSP_STAT_SUCCESS) 921 status = NVSP_STAT_FAIL; 922 } 923 924 netvsc_send_recv_completion(device, channel, net_device, 925 vmxferpage_packet->d.trans_id, status); 926 } 927 928 929 static void netvsc_send_table(struct hv_device *hdev, 930 struct vmpacket_descriptor *vmpkt) 931 { 932 struct netvsc_device *nvscdev; 933 struct net_device *ndev; 934 struct nvsp_message *nvmsg; 935 int i; 936 u32 count, *tab; 937 938 nvscdev = get_outbound_net_device(hdev); 939 if (!nvscdev) 940 return; 941 ndev = nvscdev->ndev; 942 943 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt + 944 (vmpkt->offset8 << 3)); 945 946 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE) 947 return; 948 949 count = nvmsg->msg.v5_msg.send_table.count; 950 if (count != VRSS_SEND_TAB_SIZE) { 951 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 952 return; 953 } 954 955 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + 956 nvmsg->msg.v5_msg.send_table.offset); 957 958 for (i = 0; i < count; i++) 959 nvscdev->send_table[i] = tab[i]; 960 } 961 962 void netvsc_channel_cb(void *context) 963 { 964 int ret; 965 struct vmbus_channel *channel = (struct vmbus_channel *)context; 966 struct hv_device *device; 967 struct netvsc_device *net_device; 968 u32 bytes_recvd; 969 u64 request_id; 970 struct vmpacket_descriptor *desc; 971 unsigned char *buffer; 972 int bufferlen = NETVSC_PACKET_SIZE; 973 struct net_device *ndev; 974 975 if (channel->primary_channel != NULL) 976 device = channel->primary_channel->device_obj; 977 else 978 device = channel->device_obj; 979 980 net_device = get_inbound_net_device(device); 981 if (!net_device) 982 return; 983 ndev = net_device->ndev; 984 buffer = get_per_channel_state(channel); 985 986 do { 987 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, 988 &bytes_recvd, &request_id); 989 if (ret == 0) { 990 if (bytes_recvd > 0) { 991 desc = (struct vmpacket_descriptor *)buffer; 992 switch (desc->type) { 993 case VM_PKT_COMP: 994 netvsc_send_completion(net_device, 995 device, desc); 996 break; 997 998 case VM_PKT_DATA_USING_XFER_PAGES: 999 netvsc_receive(net_device, channel, 1000 device, desc); 1001 break; 1002 1003 case VM_PKT_DATA_INBAND: 1004 netvsc_send_table(device, desc); 1005 break; 1006 1007 default: 1008 netdev_err(ndev, 1009 "unhandled packet type %d, " 1010 "tid %llx len %d\n", 1011 desc->type, request_id, 1012 bytes_recvd); 1013 break; 1014 } 1015 1016 } else { 1017 /* 1018 * We are done for this pass. 1019 */ 1020 break; 1021 } 1022 1023 } else if (ret == -ENOBUFS) { 1024 if (bufferlen > NETVSC_PACKET_SIZE) 1025 kfree(buffer); 1026 /* Handle large packet */ 1027 buffer = kmalloc(bytes_recvd, GFP_ATOMIC); 1028 if (buffer == NULL) { 1029 /* Try again next time around */ 1030 netdev_err(ndev, 1031 "unable to allocate buffer of size " 1032 "(%d)!!\n", bytes_recvd); 1033 break; 1034 } 1035 1036 bufferlen = bytes_recvd; 1037 } 1038 } while (1); 1039 1040 if (bufferlen > NETVSC_PACKET_SIZE) 1041 kfree(buffer); 1042 return; 1043 } 1044 1045 /* 1046 * netvsc_device_add - Callback when the device belonging to this 1047 * driver is added 1048 */ 1049 int netvsc_device_add(struct hv_device *device, void *additional_info) 1050 { 1051 int ret = 0; 1052 int ring_size = 1053 ((struct netvsc_device_info *)additional_info)->ring_size; 1054 struct netvsc_device *net_device; 1055 struct net_device *ndev; 1056 1057 net_device = alloc_net_device(device); 1058 if (!net_device) 1059 return -ENOMEM; 1060 1061 net_device->ring_size = ring_size; 1062 1063 /* 1064 * Coming into this function, struct net_device * is 1065 * registered as the driver private data. 1066 * In alloc_net_device(), we register struct netvsc_device * 1067 * as the driver private data and stash away struct net_device * 1068 * in struct netvsc_device *. 1069 */ 1070 ndev = net_device->ndev; 1071 1072 /* Initialize the NetVSC channel extension */ 1073 init_completion(&net_device->channel_init_wait); 1074 1075 set_per_channel_state(device->channel, net_device->cb_buffer); 1076 1077 /* Open the channel */ 1078 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1079 ring_size * PAGE_SIZE, NULL, 0, 1080 netvsc_channel_cb, device->channel); 1081 1082 if (ret != 0) { 1083 netdev_err(ndev, "unable to open channel: %d\n", ret); 1084 goto cleanup; 1085 } 1086 1087 /* Channel is opened */ 1088 pr_info("hv_netvsc channel opened successfully\n"); 1089 1090 net_device->chn_table[0] = device->channel; 1091 1092 /* Connect with the NetVsp */ 1093 ret = netvsc_connect_vsp(device); 1094 if (ret != 0) { 1095 netdev_err(ndev, 1096 "unable to connect to NetVSP - %d\n", ret); 1097 goto close; 1098 } 1099 1100 return ret; 1101 1102 close: 1103 /* Now, we can close the channel safely */ 1104 vmbus_close(device->channel); 1105 1106 cleanup: 1107 free_netvsc_device(net_device); 1108 1109 return ret; 1110 } 1111