1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Authors: 17 * Haiyang Zhang <haiyangz@microsoft.com> 18 * Hank Janssen <hjanssen@microsoft.com> 19 */ 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/wait.h> 25 #include <linux/mm.h> 26 #include <linux/delay.h> 27 #include <linux/io.h> 28 #include <linux/slab.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_ether.h> 31 #include <asm/sync_bitops.h> 32 33 #include "hyperv_net.h" 34 35 36 static struct netvsc_device *alloc_net_device(struct hv_device *device) 37 { 38 struct netvsc_device *net_device; 39 struct net_device *ndev = hv_get_drvdata(device); 40 int i; 41 42 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); 43 if (!net_device) 44 return NULL; 45 46 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); 47 if (!net_device->cb_buffer) { 48 kfree(net_device); 49 return NULL; 50 } 51 52 init_waitqueue_head(&net_device->wait_drain); 53 net_device->start_remove = false; 54 net_device->destroy = false; 55 net_device->dev = device; 56 net_device->ndev = ndev; 57 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 58 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 59 60 for (i = 0; i < num_online_cpus(); i++) 61 spin_lock_init(&net_device->msd[i].lock); 62 63 hv_set_drvdata(device, net_device); 64 return net_device; 65 } 66 67 static void free_netvsc_device(struct netvsc_device *nvdev) 68 { 69 kfree(nvdev->cb_buffer); 70 kfree(nvdev); 71 } 72 73 static struct netvsc_device *get_outbound_net_device(struct hv_device *device) 74 { 75 struct netvsc_device *net_device; 76 77 net_device = hv_get_drvdata(device); 78 if (net_device && net_device->destroy) 79 net_device = NULL; 80 81 return net_device; 82 } 83 84 static struct netvsc_device *get_inbound_net_device(struct hv_device *device) 85 { 86 struct netvsc_device *net_device; 87 88 net_device = hv_get_drvdata(device); 89 90 if (!net_device) 91 goto get_in_err; 92 93 if (net_device->destroy && 94 atomic_read(&net_device->num_outstanding_sends) == 0) 95 net_device = NULL; 96 97 get_in_err: 98 return net_device; 99 } 100 101 102 static int netvsc_destroy_buf(struct netvsc_device *net_device) 103 { 104 struct nvsp_message *revoke_packet; 105 int ret = 0; 106 struct net_device *ndev = net_device->ndev; 107 108 /* 109 * If we got a section count, it means we received a 110 * SendReceiveBufferComplete msg (ie sent 111 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 112 * to send a revoke msg here 113 */ 114 if (net_device->recv_section_cnt) { 115 /* Send the revoke receive buffer */ 116 revoke_packet = &net_device->revoke_packet; 117 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 118 119 revoke_packet->hdr.msg_type = 120 NVSP_MSG1_TYPE_REVOKE_RECV_BUF; 121 revoke_packet->msg.v1_msg. 122 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 123 124 ret = vmbus_sendpacket(net_device->dev->channel, 125 revoke_packet, 126 sizeof(struct nvsp_message), 127 (unsigned long)revoke_packet, 128 VM_PKT_DATA_INBAND, 0); 129 /* 130 * If we failed here, we might as well return and 131 * have a leak rather than continue and a bugchk 132 */ 133 if (ret != 0) { 134 netdev_err(ndev, "unable to send " 135 "revoke receive buffer to netvsp\n"); 136 return ret; 137 } 138 } 139 140 /* Teardown the gpadl on the vsp end */ 141 if (net_device->recv_buf_gpadl_handle) { 142 ret = vmbus_teardown_gpadl(net_device->dev->channel, 143 net_device->recv_buf_gpadl_handle); 144 145 /* If we failed here, we might as well return and have a leak 146 * rather than continue and a bugchk 147 */ 148 if (ret != 0) { 149 netdev_err(ndev, 150 "unable to teardown receive buffer's gpadl\n"); 151 return ret; 152 } 153 net_device->recv_buf_gpadl_handle = 0; 154 } 155 156 if (net_device->recv_buf) { 157 /* Free up the receive buffer */ 158 vfree(net_device->recv_buf); 159 net_device->recv_buf = NULL; 160 } 161 162 if (net_device->recv_section) { 163 net_device->recv_section_cnt = 0; 164 kfree(net_device->recv_section); 165 net_device->recv_section = NULL; 166 } 167 168 /* Deal with the send buffer we may have setup. 169 * If we got a send section size, it means we received a 170 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent 171 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need 172 * to send a revoke msg here 173 */ 174 if (net_device->send_section_size) { 175 /* Send the revoke receive buffer */ 176 revoke_packet = &net_device->revoke_packet; 177 memset(revoke_packet, 0, sizeof(struct nvsp_message)); 178 179 revoke_packet->hdr.msg_type = 180 NVSP_MSG1_TYPE_REVOKE_SEND_BUF; 181 revoke_packet->msg.v1_msg.revoke_send_buf.id = 182 NETVSC_SEND_BUFFER_ID; 183 184 ret = vmbus_sendpacket(net_device->dev->channel, 185 revoke_packet, 186 sizeof(struct nvsp_message), 187 (unsigned long)revoke_packet, 188 VM_PKT_DATA_INBAND, 0); 189 /* If we failed here, we might as well return and 190 * have a leak rather than continue and a bugchk 191 */ 192 if (ret != 0) { 193 netdev_err(ndev, "unable to send " 194 "revoke send buffer to netvsp\n"); 195 return ret; 196 } 197 } 198 /* Teardown the gpadl on the vsp end */ 199 if (net_device->send_buf_gpadl_handle) { 200 ret = vmbus_teardown_gpadl(net_device->dev->channel, 201 net_device->send_buf_gpadl_handle); 202 203 /* If we failed here, we might as well return and have a leak 204 * rather than continue and a bugchk 205 */ 206 if (ret != 0) { 207 netdev_err(ndev, 208 "unable to teardown send buffer's gpadl\n"); 209 return ret; 210 } 211 net_device->send_buf_gpadl_handle = 0; 212 } 213 if (net_device->send_buf) { 214 /* Free up the send buffer */ 215 vfree(net_device->send_buf); 216 net_device->send_buf = NULL; 217 } 218 kfree(net_device->send_section_map); 219 220 return ret; 221 } 222 223 static int netvsc_init_buf(struct hv_device *device) 224 { 225 int ret = 0; 226 unsigned long t; 227 struct netvsc_device *net_device; 228 struct nvsp_message *init_packet; 229 struct net_device *ndev; 230 int node; 231 232 net_device = get_outbound_net_device(device); 233 if (!net_device) 234 return -ENODEV; 235 ndev = net_device->ndev; 236 237 node = cpu_to_node(device->channel->target_cpu); 238 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); 239 if (!net_device->recv_buf) 240 net_device->recv_buf = vzalloc(net_device->recv_buf_size); 241 242 if (!net_device->recv_buf) { 243 netdev_err(ndev, "unable to allocate receive " 244 "buffer of size %d\n", net_device->recv_buf_size); 245 ret = -ENOMEM; 246 goto cleanup; 247 } 248 249 /* 250 * Establish the gpadl handle for this buffer on this 251 * channel. Note: This call uses the vmbus connection rather 252 * than the channel to establish the gpadl handle. 253 */ 254 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, 255 net_device->recv_buf_size, 256 &net_device->recv_buf_gpadl_handle); 257 if (ret != 0) { 258 netdev_err(ndev, 259 "unable to establish receive buffer's gpadl\n"); 260 goto cleanup; 261 } 262 263 264 /* Notify the NetVsp of the gpadl handle */ 265 init_packet = &net_device->channel_init_pkt; 266 267 memset(init_packet, 0, sizeof(struct nvsp_message)); 268 269 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; 270 init_packet->msg.v1_msg.send_recv_buf. 271 gpadl_handle = net_device->recv_buf_gpadl_handle; 272 init_packet->msg.v1_msg. 273 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; 274 275 /* Send the gpadl notification request */ 276 ret = vmbus_sendpacket(device->channel, init_packet, 277 sizeof(struct nvsp_message), 278 (unsigned long)init_packet, 279 VM_PKT_DATA_INBAND, 280 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 281 if (ret != 0) { 282 netdev_err(ndev, 283 "unable to send receive buffer's gpadl to netvsp\n"); 284 goto cleanup; 285 } 286 287 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 288 BUG_ON(t == 0); 289 290 291 /* Check the response */ 292 if (init_packet->msg.v1_msg. 293 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { 294 netdev_err(ndev, "Unable to complete receive buffer " 295 "initialization with NetVsp - status %d\n", 296 init_packet->msg.v1_msg. 297 send_recv_buf_complete.status); 298 ret = -EINVAL; 299 goto cleanup; 300 } 301 302 /* Parse the response */ 303 304 net_device->recv_section_cnt = init_packet->msg. 305 v1_msg.send_recv_buf_complete.num_sections; 306 307 net_device->recv_section = kmemdup( 308 init_packet->msg.v1_msg.send_recv_buf_complete.sections, 309 net_device->recv_section_cnt * 310 sizeof(struct nvsp_1_receive_buffer_section), 311 GFP_KERNEL); 312 if (net_device->recv_section == NULL) { 313 ret = -EINVAL; 314 goto cleanup; 315 } 316 317 /* 318 * For 1st release, there should only be 1 section that represents the 319 * entire receive buffer 320 */ 321 if (net_device->recv_section_cnt != 1 || 322 net_device->recv_section->offset != 0) { 323 ret = -EINVAL; 324 goto cleanup; 325 } 326 327 /* Now setup the send buffer. 328 */ 329 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); 330 if (!net_device->send_buf) 331 net_device->send_buf = vzalloc(net_device->send_buf_size); 332 if (!net_device->send_buf) { 333 netdev_err(ndev, "unable to allocate send " 334 "buffer of size %d\n", net_device->send_buf_size); 335 ret = -ENOMEM; 336 goto cleanup; 337 } 338 339 /* Establish the gpadl handle for this buffer on this 340 * channel. Note: This call uses the vmbus connection rather 341 * than the channel to establish the gpadl handle. 342 */ 343 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, 344 net_device->send_buf_size, 345 &net_device->send_buf_gpadl_handle); 346 if (ret != 0) { 347 netdev_err(ndev, 348 "unable to establish send buffer's gpadl\n"); 349 goto cleanup; 350 } 351 352 /* Notify the NetVsp of the gpadl handle */ 353 init_packet = &net_device->channel_init_pkt; 354 memset(init_packet, 0, sizeof(struct nvsp_message)); 355 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; 356 init_packet->msg.v1_msg.send_send_buf.gpadl_handle = 357 net_device->send_buf_gpadl_handle; 358 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; 359 360 /* Send the gpadl notification request */ 361 ret = vmbus_sendpacket(device->channel, init_packet, 362 sizeof(struct nvsp_message), 363 (unsigned long)init_packet, 364 VM_PKT_DATA_INBAND, 365 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 366 if (ret != 0) { 367 netdev_err(ndev, 368 "unable to send send buffer's gpadl to netvsp\n"); 369 goto cleanup; 370 } 371 372 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 373 BUG_ON(t == 0); 374 375 /* Check the response */ 376 if (init_packet->msg.v1_msg. 377 send_send_buf_complete.status != NVSP_STAT_SUCCESS) { 378 netdev_err(ndev, "Unable to complete send buffer " 379 "initialization with NetVsp - status %d\n", 380 init_packet->msg.v1_msg. 381 send_send_buf_complete.status); 382 ret = -EINVAL; 383 goto cleanup; 384 } 385 386 /* Parse the response */ 387 net_device->send_section_size = init_packet->msg. 388 v1_msg.send_send_buf_complete.section_size; 389 390 /* Section count is simply the size divided by the section size. 391 */ 392 net_device->send_section_cnt = 393 net_device->send_buf_size/net_device->send_section_size; 394 395 dev_info(&device->device, "Send section size: %d, Section count:%d\n", 396 net_device->send_section_size, net_device->send_section_cnt); 397 398 /* Setup state for managing the send buffer. */ 399 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, 400 BITS_PER_LONG); 401 402 net_device->send_section_map = 403 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); 404 if (net_device->send_section_map == NULL) { 405 ret = -ENOMEM; 406 goto cleanup; 407 } 408 409 goto exit; 410 411 cleanup: 412 netvsc_destroy_buf(net_device); 413 414 exit: 415 return ret; 416 } 417 418 419 /* Negotiate NVSP protocol version */ 420 static int negotiate_nvsp_ver(struct hv_device *device, 421 struct netvsc_device *net_device, 422 struct nvsp_message *init_packet, 423 u32 nvsp_ver) 424 { 425 int ret; 426 unsigned long t; 427 428 memset(init_packet, 0, sizeof(struct nvsp_message)); 429 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 430 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; 431 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; 432 433 /* Send the init request */ 434 ret = vmbus_sendpacket(device->channel, init_packet, 435 sizeof(struct nvsp_message), 436 (unsigned long)init_packet, 437 VM_PKT_DATA_INBAND, 438 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 439 440 if (ret != 0) 441 return ret; 442 443 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); 444 445 if (t == 0) 446 return -ETIMEDOUT; 447 448 if (init_packet->msg.init_msg.init_complete.status != 449 NVSP_STAT_SUCCESS) 450 return -EINVAL; 451 452 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) 453 return 0; 454 455 /* NVSPv2 only: Send NDIS config */ 456 memset(init_packet, 0, sizeof(struct nvsp_message)); 457 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; 458 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu + 459 ETH_HLEN; 460 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; 461 462 ret = vmbus_sendpacket(device->channel, init_packet, 463 sizeof(struct nvsp_message), 464 (unsigned long)init_packet, 465 VM_PKT_DATA_INBAND, 0); 466 467 return ret; 468 } 469 470 static int netvsc_connect_vsp(struct hv_device *device) 471 { 472 int ret; 473 struct netvsc_device *net_device; 474 struct nvsp_message *init_packet; 475 int ndis_version; 476 struct net_device *ndev; 477 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, 478 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; 479 int i, num_ver = 4; /* number of different NVSP versions */ 480 481 net_device = get_outbound_net_device(device); 482 if (!net_device) 483 return -ENODEV; 484 ndev = net_device->ndev; 485 486 init_packet = &net_device->channel_init_pkt; 487 488 /* Negotiate the latest NVSP protocol supported */ 489 for (i = num_ver - 1; i >= 0; i--) 490 if (negotiate_nvsp_ver(device, net_device, init_packet, 491 ver_list[i]) == 0) { 492 net_device->nvsp_version = ver_list[i]; 493 break; 494 } 495 496 if (i < 0) { 497 ret = -EPROTO; 498 goto cleanup; 499 } 500 501 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); 502 503 /* Send the ndis version */ 504 memset(init_packet, 0, sizeof(struct nvsp_message)); 505 506 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) 507 ndis_version = 0x00060001; 508 else 509 ndis_version = 0x0006001e; 510 511 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; 512 init_packet->msg.v1_msg. 513 send_ndis_ver.ndis_major_ver = 514 (ndis_version & 0xFFFF0000) >> 16; 515 init_packet->msg.v1_msg. 516 send_ndis_ver.ndis_minor_ver = 517 ndis_version & 0xFFFF; 518 519 /* Send the init request */ 520 ret = vmbus_sendpacket(device->channel, init_packet, 521 sizeof(struct nvsp_message), 522 (unsigned long)init_packet, 523 VM_PKT_DATA_INBAND, 0); 524 if (ret != 0) 525 goto cleanup; 526 527 /* Post the big receive buffer to NetVSP */ 528 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) 529 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; 530 else 531 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; 532 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE; 533 534 ret = netvsc_init_buf(device); 535 536 cleanup: 537 return ret; 538 } 539 540 static void netvsc_disconnect_vsp(struct netvsc_device *net_device) 541 { 542 netvsc_destroy_buf(net_device); 543 } 544 545 /* 546 * netvsc_device_remove - Callback when the root bus device is removed 547 */ 548 int netvsc_device_remove(struct hv_device *device) 549 { 550 struct netvsc_device *net_device; 551 unsigned long flags; 552 553 net_device = hv_get_drvdata(device); 554 555 netvsc_disconnect_vsp(net_device); 556 557 /* 558 * Since we have already drained, we don't need to busy wait 559 * as was done in final_release_stor_device() 560 * Note that we cannot set the ext pointer to NULL until 561 * we have drained - to drain the outgoing packets, we need to 562 * allow incoming packets. 563 */ 564 565 spin_lock_irqsave(&device->channel->inbound_lock, flags); 566 hv_set_drvdata(device, NULL); 567 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 568 569 /* 570 * At this point, no one should be accessing net_device 571 * except in here 572 */ 573 dev_notice(&device->device, "net device safe to remove\n"); 574 575 /* Now, we can close the channel safely */ 576 vmbus_close(device->channel); 577 578 /* Release all resources */ 579 vfree(net_device->sub_cb_buf); 580 free_netvsc_device(net_device); 581 return 0; 582 } 583 584 585 #define RING_AVAIL_PERCENT_HIWATER 20 586 #define RING_AVAIL_PERCENT_LOWATER 10 587 588 /* 589 * Get the percentage of available bytes to write in the ring. 590 * The return value is in range from 0 to 100. 591 */ 592 static inline u32 hv_ringbuf_avail_percent( 593 struct hv_ring_buffer_info *ring_info) 594 { 595 u32 avail_read, avail_write; 596 597 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); 598 599 return avail_write * 100 / ring_info->ring_datasize; 600 } 601 602 static inline void netvsc_free_send_slot(struct netvsc_device *net_device, 603 u32 index) 604 { 605 sync_change_bit(index, net_device->send_section_map); 606 } 607 608 static void netvsc_send_completion(struct netvsc_device *net_device, 609 struct hv_device *device, 610 struct vmpacket_descriptor *packet) 611 { 612 struct nvsp_message *nvsp_packet; 613 struct hv_netvsc_packet *nvsc_packet; 614 struct net_device *ndev; 615 u32 send_index; 616 617 ndev = net_device->ndev; 618 619 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 620 (packet->offset8 << 3)); 621 622 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || 623 (nvsp_packet->hdr.msg_type == 624 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || 625 (nvsp_packet->hdr.msg_type == 626 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) || 627 (nvsp_packet->hdr.msg_type == 628 NVSP_MSG5_TYPE_SUBCHANNEL)) { 629 /* Copy the response back */ 630 memcpy(&net_device->channel_init_pkt, nvsp_packet, 631 sizeof(struct nvsp_message)); 632 complete(&net_device->channel_init_wait); 633 } else if (nvsp_packet->hdr.msg_type == 634 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 635 int num_outstanding_sends; 636 u16 q_idx = 0; 637 struct vmbus_channel *channel = device->channel; 638 int queue_sends; 639 640 /* Get the send context */ 641 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 642 packet->trans_id; 643 644 /* Notify the layer above us */ 645 if (nvsc_packet) { 646 send_index = nvsc_packet->send_buf_index; 647 if (send_index != NETVSC_INVALID_INDEX) 648 netvsc_free_send_slot(net_device, send_index); 649 q_idx = nvsc_packet->q_idx; 650 channel = nvsc_packet->channel; 651 nvsc_packet->send_completion(nvsc_packet-> 652 send_completion_ctx); 653 } 654 655 num_outstanding_sends = 656 atomic_dec_return(&net_device->num_outstanding_sends); 657 queue_sends = atomic_dec_return(&net_device-> 658 queue_sends[q_idx]); 659 660 if (net_device->destroy && num_outstanding_sends == 0) 661 wake_up(&net_device->wait_drain); 662 663 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 664 !net_device->start_remove && 665 (hv_ringbuf_avail_percent(&channel->outbound) > 666 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) 667 netif_tx_wake_queue(netdev_get_tx_queue( 668 ndev, q_idx)); 669 } else { 670 netdev_err(ndev, "Unknown send completion packet type- " 671 "%d received!!\n", nvsp_packet->hdr.msg_type); 672 } 673 674 } 675 676 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) 677 { 678 unsigned long index; 679 u32 max_words = net_device->map_words; 680 unsigned long *map_addr = (unsigned long *)net_device->send_section_map; 681 u32 section_cnt = net_device->send_section_cnt; 682 int ret_val = NETVSC_INVALID_INDEX; 683 int i; 684 int prev_val; 685 686 for (i = 0; i < max_words; i++) { 687 if (!~(map_addr[i])) 688 continue; 689 index = ffz(map_addr[i]); 690 prev_val = sync_test_and_set_bit(index, &map_addr[i]); 691 if (prev_val) 692 continue; 693 if ((index + (i * BITS_PER_LONG)) >= section_cnt) 694 break; 695 ret_val = (index + (i * BITS_PER_LONG)); 696 break; 697 } 698 return ret_val; 699 } 700 701 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, 702 unsigned int section_index, 703 u32 pend_size, 704 struct hv_netvsc_packet *packet) 705 { 706 char *start = net_device->send_buf; 707 char *dest = start + (section_index * net_device->send_section_size) 708 + pend_size; 709 int i; 710 u32 msg_size = 0; 711 u32 padding = 0; 712 u32 remain = packet->total_data_buflen % net_device->pkt_align; 713 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : 714 packet->page_buf_cnt; 715 716 /* Add padding */ 717 if (packet->is_data_pkt && packet->xmit_more && remain && 718 !packet->cp_partial) { 719 padding = net_device->pkt_align - remain; 720 packet->rndis_msg->msg_len += padding; 721 packet->total_data_buflen += padding; 722 } 723 724 for (i = 0; i < page_count; i++) { 725 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); 726 u32 offset = packet->page_buf[i].offset; 727 u32 len = packet->page_buf[i].len; 728 729 memcpy(dest, (src + offset), len); 730 msg_size += len; 731 dest += len; 732 } 733 734 if (padding) { 735 memset(dest, 0, padding); 736 msg_size += padding; 737 } 738 739 return msg_size; 740 } 741 742 static inline int netvsc_send_pkt( 743 struct hv_netvsc_packet *packet, 744 struct netvsc_device *net_device) 745 { 746 struct nvsp_message nvmsg; 747 struct vmbus_channel *out_channel = packet->channel; 748 u16 q_idx = packet->q_idx; 749 struct net_device *ndev = net_device->ndev; 750 u64 req_id; 751 int ret; 752 struct hv_page_buffer *pgbuf; 753 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); 754 755 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 756 if (packet->is_data_pkt) { 757 /* 0 is RMC_DATA; */ 758 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; 759 } else { 760 /* 1 is RMC_CONTROL; */ 761 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1; 762 } 763 764 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index = 765 packet->send_buf_index; 766 if (packet->send_buf_index == NETVSC_INVALID_INDEX) 767 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; 768 else 769 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 770 packet->total_data_buflen; 771 772 if (packet->send_completion) 773 req_id = (ulong)packet; 774 else 775 req_id = 0; 776 777 if (out_channel->rescind) 778 return -ENODEV; 779 780 /* 781 * It is possible that once we successfully place this packet 782 * on the ringbuffer, we may stop the queue. In that case, we want 783 * to notify the host independent of the xmit_more flag. We don't 784 * need to be precise here; in the worst case we may signal the host 785 * unnecessarily. 786 */ 787 if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1)) 788 packet->xmit_more = false; 789 790 if (packet->page_buf_cnt) { 791 pgbuf = packet->cp_partial ? packet->page_buf + 792 packet->rmsg_pgcnt : packet->page_buf; 793 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, 794 pgbuf, 795 packet->page_buf_cnt, 796 &nvmsg, 797 sizeof(struct nvsp_message), 798 req_id, 799 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, 800 !packet->xmit_more); 801 } else { 802 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, 803 sizeof(struct nvsp_message), 804 req_id, 805 VM_PKT_DATA_INBAND, 806 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, 807 !packet->xmit_more); 808 } 809 810 if (ret == 0) { 811 atomic_inc(&net_device->num_outstanding_sends); 812 atomic_inc(&net_device->queue_sends[q_idx]); 813 814 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { 815 netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx)); 816 817 if (atomic_read(&net_device-> 818 queue_sends[q_idx]) < 1) 819 netif_tx_wake_queue(netdev_get_tx_queue( 820 ndev, q_idx)); 821 } 822 } else if (ret == -EAGAIN) { 823 netif_tx_stop_queue(netdev_get_tx_queue( 824 ndev, q_idx)); 825 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { 826 netif_tx_wake_queue(netdev_get_tx_queue( 827 ndev, q_idx)); 828 ret = -ENOSPC; 829 } 830 } else { 831 netdev_err(ndev, "Unable to send packet %p ret %d\n", 832 packet, ret); 833 } 834 835 return ret; 836 } 837 838 int netvsc_send(struct hv_device *device, 839 struct hv_netvsc_packet *packet) 840 { 841 struct netvsc_device *net_device; 842 int ret = 0, m_ret = 0; 843 struct vmbus_channel *out_channel; 844 u16 q_idx = packet->q_idx; 845 u32 pktlen = packet->total_data_buflen, msd_len = 0; 846 unsigned int section_index = NETVSC_INVALID_INDEX; 847 unsigned long flag; 848 struct multi_send_data *msdp; 849 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; 850 bool try_batch; 851 852 net_device = get_outbound_net_device(device); 853 if (!net_device) 854 return -ENODEV; 855 856 out_channel = net_device->chn_table[q_idx]; 857 if (!out_channel) { 858 out_channel = device->channel; 859 q_idx = 0; 860 packet->q_idx = 0; 861 } 862 packet->channel = out_channel; 863 packet->send_buf_index = NETVSC_INVALID_INDEX; 864 packet->cp_partial = false; 865 866 msdp = &net_device->msd[q_idx]; 867 868 /* batch packets in send buffer if possible */ 869 spin_lock_irqsave(&msdp->lock, flag); 870 if (msdp->pkt) 871 msd_len = msdp->pkt->total_data_buflen; 872 873 try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count < 874 net_device->max_pkt; 875 876 if (try_batch && msd_len + pktlen + net_device->pkt_align < 877 net_device->send_section_size) { 878 section_index = msdp->pkt->send_buf_index; 879 880 } else if (try_batch && msd_len + packet->rmsg_size < 881 net_device->send_section_size) { 882 section_index = msdp->pkt->send_buf_index; 883 packet->cp_partial = true; 884 885 } else if (packet->is_data_pkt && pktlen + net_device->pkt_align < 886 net_device->send_section_size) { 887 section_index = netvsc_get_next_send_section(net_device); 888 if (section_index != NETVSC_INVALID_INDEX) { 889 msd_send = msdp->pkt; 890 msdp->pkt = NULL; 891 msdp->count = 0; 892 msd_len = 0; 893 } 894 } 895 896 if (section_index != NETVSC_INVALID_INDEX) { 897 netvsc_copy_to_send_buf(net_device, 898 section_index, msd_len, 899 packet); 900 901 packet->send_buf_index = section_index; 902 903 if (packet->cp_partial) { 904 packet->page_buf_cnt -= packet->rmsg_pgcnt; 905 packet->total_data_buflen = msd_len + packet->rmsg_size; 906 } else { 907 packet->page_buf_cnt = 0; 908 packet->total_data_buflen += msd_len; 909 } 910 911 if (msdp->pkt) 912 netvsc_xmit_completion(msdp->pkt); 913 914 if (packet->xmit_more && !packet->cp_partial) { 915 msdp->pkt = packet; 916 msdp->count++; 917 } else { 918 cur_send = packet; 919 msdp->pkt = NULL; 920 msdp->count = 0; 921 } 922 } else { 923 msd_send = msdp->pkt; 924 msdp->pkt = NULL; 925 msdp->count = 0; 926 cur_send = packet; 927 } 928 929 spin_unlock_irqrestore(&msdp->lock, flag); 930 931 if (msd_send) { 932 m_ret = netvsc_send_pkt(msd_send, net_device); 933 934 if (m_ret != 0) { 935 netvsc_free_send_slot(net_device, 936 msd_send->send_buf_index); 937 netvsc_xmit_completion(msd_send); 938 } 939 } 940 941 if (cur_send) 942 ret = netvsc_send_pkt(cur_send, net_device); 943 944 if (ret != 0 && section_index != NETVSC_INVALID_INDEX) 945 netvsc_free_send_slot(net_device, section_index); 946 947 return ret; 948 } 949 950 static void netvsc_send_recv_completion(struct hv_device *device, 951 struct vmbus_channel *channel, 952 struct netvsc_device *net_device, 953 u64 transaction_id, u32 status) 954 { 955 struct nvsp_message recvcompMessage; 956 int retries = 0; 957 int ret; 958 struct net_device *ndev; 959 960 ndev = net_device->ndev; 961 962 recvcompMessage.hdr.msg_type = 963 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; 964 965 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; 966 967 retry_send_cmplt: 968 /* Send the completion */ 969 ret = vmbus_sendpacket(channel, &recvcompMessage, 970 sizeof(struct nvsp_message), transaction_id, 971 VM_PKT_COMP, 0); 972 if (ret == 0) { 973 /* success */ 974 /* no-op */ 975 } else if (ret == -EAGAIN) { 976 /* no more room...wait a bit and attempt to retry 3 times */ 977 retries++; 978 netdev_err(ndev, "unable to send receive completion pkt" 979 " (tid %llx)...retrying %d\n", transaction_id, retries); 980 981 if (retries < 4) { 982 udelay(100); 983 goto retry_send_cmplt; 984 } else { 985 netdev_err(ndev, "unable to send receive " 986 "completion pkt (tid %llx)...give up retrying\n", 987 transaction_id); 988 } 989 } else { 990 netdev_err(ndev, "unable to send receive " 991 "completion pkt - %llx\n", transaction_id); 992 } 993 } 994 995 static void netvsc_receive(struct netvsc_device *net_device, 996 struct vmbus_channel *channel, 997 struct hv_device *device, 998 struct vmpacket_descriptor *packet) 999 { 1000 struct vmtransfer_page_packet_header *vmxferpage_packet; 1001 struct nvsp_message *nvsp_packet; 1002 struct hv_netvsc_packet nv_pkt; 1003 struct hv_netvsc_packet *netvsc_packet = &nv_pkt; 1004 u32 status = NVSP_STAT_SUCCESS; 1005 int i; 1006 int count = 0; 1007 struct net_device *ndev; 1008 1009 ndev = net_device->ndev; 1010 1011 /* 1012 * All inbound packets other than send completion should be xfer page 1013 * packet 1014 */ 1015 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) { 1016 netdev_err(ndev, "Unknown packet type received - %d\n", 1017 packet->type); 1018 return; 1019 } 1020 1021 nvsp_packet = (struct nvsp_message *)((unsigned long)packet + 1022 (packet->offset8 << 3)); 1023 1024 /* Make sure this is a valid nvsp packet */ 1025 if (nvsp_packet->hdr.msg_type != 1026 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) { 1027 netdev_err(ndev, "Unknown nvsp packet type received-" 1028 " %d\n", nvsp_packet->hdr.msg_type); 1029 return; 1030 } 1031 1032 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet; 1033 1034 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) { 1035 netdev_err(ndev, "Invalid xfer page set id - " 1036 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, 1037 vmxferpage_packet->xfer_pageset_id); 1038 return; 1039 } 1040 1041 count = vmxferpage_packet->range_cnt; 1042 netvsc_packet->channel = channel; 1043 1044 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 1045 for (i = 0; i < count; i++) { 1046 /* Initialize the netvsc packet */ 1047 netvsc_packet->status = NVSP_STAT_SUCCESS; 1048 netvsc_packet->data = (void *)((unsigned long)net_device-> 1049 recv_buf + vmxferpage_packet->ranges[i].byte_offset); 1050 netvsc_packet->total_data_buflen = 1051 vmxferpage_packet->ranges[i].byte_count; 1052 1053 /* Pass it to the upper layer */ 1054 rndis_filter_receive(device, netvsc_packet); 1055 1056 if (netvsc_packet->status != NVSP_STAT_SUCCESS) 1057 status = NVSP_STAT_FAIL; 1058 } 1059 1060 netvsc_send_recv_completion(device, channel, net_device, 1061 vmxferpage_packet->d.trans_id, status); 1062 } 1063 1064 1065 static void netvsc_send_table(struct hv_device *hdev, 1066 struct vmpacket_descriptor *vmpkt) 1067 { 1068 struct netvsc_device *nvscdev; 1069 struct net_device *ndev; 1070 struct nvsp_message *nvmsg; 1071 int i; 1072 u32 count, *tab; 1073 1074 nvscdev = get_outbound_net_device(hdev); 1075 if (!nvscdev) 1076 return; 1077 ndev = nvscdev->ndev; 1078 1079 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt + 1080 (vmpkt->offset8 << 3)); 1081 1082 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE) 1083 return; 1084 1085 count = nvmsg->msg.v5_msg.send_table.count; 1086 if (count != VRSS_SEND_TAB_SIZE) { 1087 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 1088 return; 1089 } 1090 1091 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + 1092 nvmsg->msg.v5_msg.send_table.offset); 1093 1094 for (i = 0; i < count; i++) 1095 nvscdev->send_table[i] = tab[i]; 1096 } 1097 1098 void netvsc_channel_cb(void *context) 1099 { 1100 int ret; 1101 struct vmbus_channel *channel = (struct vmbus_channel *)context; 1102 struct hv_device *device; 1103 struct netvsc_device *net_device; 1104 u32 bytes_recvd; 1105 u64 request_id; 1106 struct vmpacket_descriptor *desc; 1107 unsigned char *buffer; 1108 int bufferlen = NETVSC_PACKET_SIZE; 1109 struct net_device *ndev; 1110 1111 if (channel->primary_channel != NULL) 1112 device = channel->primary_channel->device_obj; 1113 else 1114 device = channel->device_obj; 1115 1116 net_device = get_inbound_net_device(device); 1117 if (!net_device) 1118 return; 1119 ndev = net_device->ndev; 1120 buffer = get_per_channel_state(channel); 1121 1122 do { 1123 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, 1124 &bytes_recvd, &request_id); 1125 if (ret == 0) { 1126 if (bytes_recvd > 0) { 1127 desc = (struct vmpacket_descriptor *)buffer; 1128 switch (desc->type) { 1129 case VM_PKT_COMP: 1130 netvsc_send_completion(net_device, 1131 device, desc); 1132 break; 1133 1134 case VM_PKT_DATA_USING_XFER_PAGES: 1135 netvsc_receive(net_device, channel, 1136 device, desc); 1137 break; 1138 1139 case VM_PKT_DATA_INBAND: 1140 netvsc_send_table(device, desc); 1141 break; 1142 1143 default: 1144 netdev_err(ndev, 1145 "unhandled packet type %d, " 1146 "tid %llx len %d\n", 1147 desc->type, request_id, 1148 bytes_recvd); 1149 break; 1150 } 1151 1152 } else { 1153 /* 1154 * We are done for this pass. 1155 */ 1156 break; 1157 } 1158 1159 } else if (ret == -ENOBUFS) { 1160 if (bufferlen > NETVSC_PACKET_SIZE) 1161 kfree(buffer); 1162 /* Handle large packet */ 1163 buffer = kmalloc(bytes_recvd, GFP_ATOMIC); 1164 if (buffer == NULL) { 1165 /* Try again next time around */ 1166 netdev_err(ndev, 1167 "unable to allocate buffer of size " 1168 "(%d)!!\n", bytes_recvd); 1169 break; 1170 } 1171 1172 bufferlen = bytes_recvd; 1173 } 1174 } while (1); 1175 1176 if (bufferlen > NETVSC_PACKET_SIZE) 1177 kfree(buffer); 1178 return; 1179 } 1180 1181 /* 1182 * netvsc_device_add - Callback when the device belonging to this 1183 * driver is added 1184 */ 1185 int netvsc_device_add(struct hv_device *device, void *additional_info) 1186 { 1187 int ret = 0; 1188 int ring_size = 1189 ((struct netvsc_device_info *)additional_info)->ring_size; 1190 struct netvsc_device *net_device; 1191 struct net_device *ndev; 1192 1193 net_device = alloc_net_device(device); 1194 if (!net_device) 1195 return -ENOMEM; 1196 1197 net_device->ring_size = ring_size; 1198 1199 /* 1200 * Coming into this function, struct net_device * is 1201 * registered as the driver private data. 1202 * In alloc_net_device(), we register struct netvsc_device * 1203 * as the driver private data and stash away struct net_device * 1204 * in struct netvsc_device *. 1205 */ 1206 ndev = net_device->ndev; 1207 1208 /* Add netvsc_device context to netvsc_device */ 1209 net_device->nd_ctx = netdev_priv(ndev); 1210 1211 /* Initialize the NetVSC channel extension */ 1212 init_completion(&net_device->channel_init_wait); 1213 1214 set_per_channel_state(device->channel, net_device->cb_buffer); 1215 1216 /* Open the channel */ 1217 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, 1218 ring_size * PAGE_SIZE, NULL, 0, 1219 netvsc_channel_cb, device->channel); 1220 1221 if (ret != 0) { 1222 netdev_err(ndev, "unable to open channel: %d\n", ret); 1223 goto cleanup; 1224 } 1225 1226 /* Channel is opened */ 1227 pr_info("hv_netvsc channel opened successfully\n"); 1228 1229 net_device->chn_table[0] = device->channel; 1230 1231 /* Connect with the NetVsp */ 1232 ret = netvsc_connect_vsp(device); 1233 if (ret != 0) { 1234 netdev_err(ndev, 1235 "unable to connect to NetVSP - %d\n", ret); 1236 goto close; 1237 } 1238 1239 return ret; 1240 1241 close: 1242 /* Now, we can close the channel safely */ 1243 vmbus_close(device->channel); 1244 1245 cleanup: 1246 free_netvsc_device(net_device); 1247 1248 return ret; 1249 } 1250