1 /* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 */ 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/wait.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/module.h> 29 #include <linux/hyperv.h> 30 #include <linux/uio.h> 31 #include <linux/interrupt.h> 32 33 #include "hyperv_vmbus.h" 34 35 #define NUM_PAGES_SPANNED(addr, len) \ 36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT)) 37 38 /* 39 * vmbus_setevent- Trigger an event notification on the specified 40 * channel. 41 */ 42 static void vmbus_setevent(struct vmbus_channel *channel) 43 { 44 struct hv_monitor_page *monitorpage; 45 46 if (channel->offermsg.monitor_allocated) { 47 /* Each u32 represents 32 channels */ 48 sync_set_bit(channel->offermsg.child_relid & 31, 49 (unsigned long *) vmbus_connection.send_int_page + 50 (channel->offermsg.child_relid >> 5)); 51 52 /* Get the child to parent monitor page */ 53 monitorpage = vmbus_connection.monitor_pages[1]; 54 55 sync_set_bit(channel->monitor_bit, 56 (unsigned long *)&monitorpage->trigger_group 57 [channel->monitor_grp].pending); 58 59 } else { 60 vmbus_set_event(channel); 61 } 62 } 63 64 /* 65 * vmbus_open - Open the specified channel. 66 */ 67 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, 68 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen, 69 void (*onchannelcallback)(void *context), void *context) 70 { 71 struct vmbus_channel_open_channel *open_msg; 72 struct vmbus_channel_msginfo *open_info = NULL; 73 void *in, *out; 74 unsigned long flags; 75 int ret, err = 0; 76 unsigned long t; 77 struct page *page; 78 79 spin_lock_irqsave(&newchannel->lock, flags); 80 if (newchannel->state == CHANNEL_OPEN_STATE) { 81 newchannel->state = CHANNEL_OPENING_STATE; 82 } else { 83 spin_unlock_irqrestore(&newchannel->lock, flags); 84 return -EINVAL; 85 } 86 spin_unlock_irqrestore(&newchannel->lock, flags); 87 88 newchannel->onchannel_callback = onchannelcallback; 89 newchannel->channel_callback_context = context; 90 91 /* Allocate the ring buffer */ 92 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), 93 GFP_KERNEL|__GFP_ZERO, 94 get_order(send_ringbuffer_size + 95 recv_ringbuffer_size)); 96 97 if (!page) 98 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 99 get_order(send_ringbuffer_size + 100 recv_ringbuffer_size)); 101 else 102 out = (void *)page_address(page); 103 104 if (!out) { 105 err = -ENOMEM; 106 goto error0; 107 } 108 109 in = (void *)((unsigned long)out + send_ringbuffer_size); 110 111 newchannel->ringbuffer_pages = out; 112 newchannel->ringbuffer_pagecount = (send_ringbuffer_size + 113 recv_ringbuffer_size) >> PAGE_SHIFT; 114 115 ret = hv_ringbuffer_init( 116 &newchannel->outbound, out, send_ringbuffer_size); 117 118 if (ret != 0) { 119 err = ret; 120 goto error0; 121 } 122 123 ret = hv_ringbuffer_init( 124 &newchannel->inbound, in, recv_ringbuffer_size); 125 if (ret != 0) { 126 err = ret; 127 goto error0; 128 } 129 130 131 /* Establish the gpadl for the ring buffer */ 132 newchannel->ringbuffer_gpadlhandle = 0; 133 134 ret = vmbus_establish_gpadl(newchannel, 135 newchannel->outbound.ring_buffer, 136 send_ringbuffer_size + 137 recv_ringbuffer_size, 138 &newchannel->ringbuffer_gpadlhandle); 139 140 if (ret != 0) { 141 err = ret; 142 goto error0; 143 } 144 145 /* Create and init the channel open message */ 146 open_info = kmalloc(sizeof(*open_info) + 147 sizeof(struct vmbus_channel_open_channel), 148 GFP_KERNEL); 149 if (!open_info) { 150 err = -ENOMEM; 151 goto error_gpadl; 152 } 153 154 init_completion(&open_info->waitevent); 155 156 open_msg = (struct vmbus_channel_open_channel *)open_info->msg; 157 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL; 158 open_msg->openid = newchannel->offermsg.child_relid; 159 open_msg->child_relid = newchannel->offermsg.child_relid; 160 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; 161 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> 162 PAGE_SHIFT; 163 open_msg->target_vp = newchannel->target_vp; 164 165 if (userdatalen > MAX_USER_DEFINED_BYTES) { 166 err = -EINVAL; 167 goto error_gpadl; 168 } 169 170 if (userdatalen) 171 memcpy(open_msg->userdata, userdata, userdatalen); 172 173 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 174 list_add_tail(&open_info->msglistentry, 175 &vmbus_connection.chn_msg_list); 176 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 177 178 ret = vmbus_post_msg(open_msg, 179 sizeof(struct vmbus_channel_open_channel)); 180 181 if (ret != 0) { 182 err = ret; 183 goto error1; 184 } 185 186 t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); 187 if (t == 0) { 188 err = -ETIMEDOUT; 189 goto error1; 190 } 191 192 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 193 list_del(&open_info->msglistentry); 194 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 195 196 if (open_info->response.open_result.status) { 197 err = -EAGAIN; 198 goto error_gpadl; 199 } 200 201 newchannel->state = CHANNEL_OPENED_STATE; 202 kfree(open_info); 203 return 0; 204 205 error1: 206 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 207 list_del(&open_info->msglistentry); 208 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 209 210 error_gpadl: 211 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); 212 213 error0: 214 free_pages((unsigned long)out, 215 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 216 kfree(open_info); 217 newchannel->state = CHANNEL_OPEN_STATE; 218 return err; 219 } 220 EXPORT_SYMBOL_GPL(vmbus_open); 221 222 /* 223 * create_gpadl_header - Creates a gpadl for the specified buffer 224 */ 225 static int create_gpadl_header(void *kbuffer, u32 size, 226 struct vmbus_channel_msginfo **msginfo, 227 u32 *messagecount) 228 { 229 int i; 230 int pagecount; 231 struct vmbus_channel_gpadl_header *gpadl_header; 232 struct vmbus_channel_gpadl_body *gpadl_body; 233 struct vmbus_channel_msginfo *msgheader; 234 struct vmbus_channel_msginfo *msgbody = NULL; 235 u32 msgsize; 236 237 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize; 238 239 pagecount = size >> PAGE_SHIFT; 240 241 /* do we need a gpadl body msg */ 242 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 243 sizeof(struct vmbus_channel_gpadl_header) - 244 sizeof(struct gpa_range); 245 pfncount = pfnsize / sizeof(u64); 246 247 if (pagecount > pfncount) { 248 /* we need a gpadl body */ 249 /* fill in the header */ 250 msgsize = sizeof(struct vmbus_channel_msginfo) + 251 sizeof(struct vmbus_channel_gpadl_header) + 252 sizeof(struct gpa_range) + pfncount * sizeof(u64); 253 msgheader = kzalloc(msgsize, GFP_KERNEL); 254 if (!msgheader) 255 goto nomem; 256 257 INIT_LIST_HEAD(&msgheader->submsglist); 258 msgheader->msgsize = msgsize; 259 260 gpadl_header = (struct vmbus_channel_gpadl_header *) 261 msgheader->msg; 262 gpadl_header->rangecount = 1; 263 gpadl_header->range_buflen = sizeof(struct gpa_range) + 264 pagecount * sizeof(u64); 265 gpadl_header->range[0].byte_offset = 0; 266 gpadl_header->range[0].byte_count = size; 267 for (i = 0; i < pfncount; i++) 268 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( 269 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; 270 *msginfo = msgheader; 271 *messagecount = 1; 272 273 pfnsum = pfncount; 274 pfnleft = pagecount - pfncount; 275 276 /* how many pfns can we fit */ 277 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 278 sizeof(struct vmbus_channel_gpadl_body); 279 pfncount = pfnsize / sizeof(u64); 280 281 /* fill in the body */ 282 while (pfnleft) { 283 if (pfnleft > pfncount) 284 pfncurr = pfncount; 285 else 286 pfncurr = pfnleft; 287 288 msgsize = sizeof(struct vmbus_channel_msginfo) + 289 sizeof(struct vmbus_channel_gpadl_body) + 290 pfncurr * sizeof(u64); 291 msgbody = kzalloc(msgsize, GFP_KERNEL); 292 293 if (!msgbody) { 294 struct vmbus_channel_msginfo *pos = NULL; 295 struct vmbus_channel_msginfo *tmp = NULL; 296 /* 297 * Free up all the allocated messages. 298 */ 299 list_for_each_entry_safe(pos, tmp, 300 &msgheader->submsglist, 301 msglistentry) { 302 303 list_del(&pos->msglistentry); 304 kfree(pos); 305 } 306 307 goto nomem; 308 } 309 310 msgbody->msgsize = msgsize; 311 (*messagecount)++; 312 gpadl_body = 313 (struct vmbus_channel_gpadl_body *)msgbody->msg; 314 315 /* 316 * Gpadl is u32 and we are using a pointer which could 317 * be 64-bit 318 * This is governed by the guest/host protocol and 319 * so the hypervisor gurantees that this is ok. 320 */ 321 for (i = 0; i < pfncurr; i++) 322 gpadl_body->pfn[i] = slow_virt_to_phys( 323 kbuffer + PAGE_SIZE * (pfnsum + i)) >> 324 PAGE_SHIFT; 325 326 /* add to msg header */ 327 list_add_tail(&msgbody->msglistentry, 328 &msgheader->submsglist); 329 pfnsum += pfncurr; 330 pfnleft -= pfncurr; 331 } 332 } else { 333 /* everything fits in a header */ 334 msgsize = sizeof(struct vmbus_channel_msginfo) + 335 sizeof(struct vmbus_channel_gpadl_header) + 336 sizeof(struct gpa_range) + pagecount * sizeof(u64); 337 msgheader = kzalloc(msgsize, GFP_KERNEL); 338 if (msgheader == NULL) 339 goto nomem; 340 msgheader->msgsize = msgsize; 341 342 gpadl_header = (struct vmbus_channel_gpadl_header *) 343 msgheader->msg; 344 gpadl_header->rangecount = 1; 345 gpadl_header->range_buflen = sizeof(struct gpa_range) + 346 pagecount * sizeof(u64); 347 gpadl_header->range[0].byte_offset = 0; 348 gpadl_header->range[0].byte_count = size; 349 for (i = 0; i < pagecount; i++) 350 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( 351 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; 352 353 *msginfo = msgheader; 354 *messagecount = 1; 355 } 356 357 return 0; 358 nomem: 359 kfree(msgheader); 360 kfree(msgbody); 361 return -ENOMEM; 362 } 363 364 /* 365 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer 366 * 367 * @channel: a channel 368 * @kbuffer: from kmalloc or vmalloc 369 * @size: page-size multiple 370 * @gpadl_handle: some funky thing 371 */ 372 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, 373 u32 size, u32 *gpadl_handle) 374 { 375 struct vmbus_channel_gpadl_header *gpadlmsg; 376 struct vmbus_channel_gpadl_body *gpadl_body; 377 struct vmbus_channel_msginfo *msginfo = NULL; 378 struct vmbus_channel_msginfo *submsginfo; 379 u32 msgcount; 380 struct list_head *curr; 381 u32 next_gpadl_handle; 382 unsigned long flags; 383 int ret = 0; 384 385 next_gpadl_handle = 386 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); 387 388 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount); 389 if (ret) 390 return ret; 391 392 init_completion(&msginfo->waitevent); 393 394 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg; 395 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER; 396 gpadlmsg->child_relid = channel->offermsg.child_relid; 397 gpadlmsg->gpadl = next_gpadl_handle; 398 399 400 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 401 list_add_tail(&msginfo->msglistentry, 402 &vmbus_connection.chn_msg_list); 403 404 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 405 406 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize - 407 sizeof(*msginfo)); 408 if (ret != 0) 409 goto cleanup; 410 411 if (msgcount > 1) { 412 list_for_each(curr, &msginfo->submsglist) { 413 414 submsginfo = (struct vmbus_channel_msginfo *)curr; 415 gpadl_body = 416 (struct vmbus_channel_gpadl_body *)submsginfo->msg; 417 418 gpadl_body->header.msgtype = 419 CHANNELMSG_GPADL_BODY; 420 gpadl_body->gpadl = next_gpadl_handle; 421 422 ret = vmbus_post_msg(gpadl_body, 423 submsginfo->msgsize - 424 sizeof(*submsginfo)); 425 if (ret != 0) 426 goto cleanup; 427 428 } 429 } 430 wait_for_completion(&msginfo->waitevent); 431 432 /* At this point, we received the gpadl created msg */ 433 *gpadl_handle = gpadlmsg->gpadl; 434 435 cleanup: 436 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 437 list_del(&msginfo->msglistentry); 438 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 439 440 kfree(msginfo); 441 return ret; 442 } 443 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl); 444 445 /* 446 * vmbus_teardown_gpadl -Teardown the specified GPADL handle 447 */ 448 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) 449 { 450 struct vmbus_channel_gpadl_teardown *msg; 451 struct vmbus_channel_msginfo *info; 452 unsigned long flags; 453 int ret; 454 455 info = kmalloc(sizeof(*info) + 456 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); 457 if (!info) 458 return -ENOMEM; 459 460 init_completion(&info->waitevent); 461 462 msg = (struct vmbus_channel_gpadl_teardown *)info->msg; 463 464 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN; 465 msg->child_relid = channel->offermsg.child_relid; 466 msg->gpadl = gpadl_handle; 467 468 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 469 list_add_tail(&info->msglistentry, 470 &vmbus_connection.chn_msg_list); 471 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 472 ret = vmbus_post_msg(msg, 473 sizeof(struct vmbus_channel_gpadl_teardown)); 474 475 if (ret) 476 goto post_msg_err; 477 478 wait_for_completion(&info->waitevent); 479 480 post_msg_err: 481 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 482 list_del(&info->msglistentry); 483 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 484 485 kfree(info); 486 return ret; 487 } 488 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); 489 490 static void reset_channel_cb(void *arg) 491 { 492 struct vmbus_channel *channel = arg; 493 494 channel->onchannel_callback = NULL; 495 } 496 497 static int vmbus_close_internal(struct vmbus_channel *channel) 498 { 499 struct vmbus_channel_close_channel *msg; 500 struct tasklet_struct *tasklet; 501 int ret; 502 503 /* 504 * process_chn_event(), running in the tasklet, can race 505 * with vmbus_close_internal() in the case of SMP guest, e.g., when 506 * the former is accessing channel->inbound.ring_buffer, the latter 507 * could be freeing the ring_buffer pages. 508 * 509 * To resolve the race, we can serialize them by disabling the 510 * tasklet when the latter is running here. 511 */ 512 tasklet = hv_context.event_dpc[channel->target_cpu]; 513 tasklet_disable(tasklet); 514 515 /* 516 * In case a device driver's probe() fails (e.g., 517 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is 518 * rescinded later (e.g., we dynamically disble an Integrated Service 519 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): 520 * here we should skip most of the below cleanup work. 521 */ 522 if (channel->state != CHANNEL_OPENED_STATE) { 523 ret = -EINVAL; 524 goto out; 525 } 526 527 channel->state = CHANNEL_OPEN_STATE; 528 channel->sc_creation_callback = NULL; 529 /* Stop callback and cancel the timer asap */ 530 if (channel->target_cpu != get_cpu()) { 531 put_cpu(); 532 smp_call_function_single(channel->target_cpu, reset_channel_cb, 533 channel, true); 534 } else { 535 reset_channel_cb(channel); 536 put_cpu(); 537 } 538 539 /* Send a closing message */ 540 541 msg = &channel->close_msg.msg; 542 543 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL; 544 msg->child_relid = channel->offermsg.child_relid; 545 546 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); 547 548 if (ret) { 549 pr_err("Close failed: close post msg return is %d\n", ret); 550 /* 551 * If we failed to post the close msg, 552 * it is perhaps better to leak memory. 553 */ 554 goto out; 555 } 556 557 /* Tear down the gpadl for the channel's ring buffer */ 558 if (channel->ringbuffer_gpadlhandle) { 559 ret = vmbus_teardown_gpadl(channel, 560 channel->ringbuffer_gpadlhandle); 561 if (ret) { 562 pr_err("Close failed: teardown gpadl return %d\n", ret); 563 /* 564 * If we failed to teardown gpadl, 565 * it is perhaps better to leak memory. 566 */ 567 goto out; 568 } 569 } 570 571 /* Cleanup the ring buffers for this channel */ 572 hv_ringbuffer_cleanup(&channel->outbound); 573 hv_ringbuffer_cleanup(&channel->inbound); 574 575 free_pages((unsigned long)channel->ringbuffer_pages, 576 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 577 578 out: 579 tasklet_enable(tasklet); 580 581 return ret; 582 } 583 584 /* 585 * vmbus_close - Close the specified channel 586 */ 587 void vmbus_close(struct vmbus_channel *channel) 588 { 589 struct list_head *cur, *tmp; 590 struct vmbus_channel *cur_channel; 591 592 if (channel->primary_channel != NULL) { 593 /* 594 * We will only close sub-channels when 595 * the primary is closed. 596 */ 597 return; 598 } 599 /* 600 * Close all the sub-channels first and then close the 601 * primary channel. 602 */ 603 list_for_each_safe(cur, tmp, &channel->sc_list) { 604 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 605 if (cur_channel->state != CHANNEL_OPENED_STATE) 606 continue; 607 vmbus_close_internal(cur_channel); 608 } 609 /* 610 * Now close the primary. 611 */ 612 vmbus_close_internal(channel); 613 } 614 EXPORT_SYMBOL_GPL(vmbus_close); 615 616 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, 617 u32 bufferlen, u64 requestid, 618 enum vmbus_packet_type type, u32 flags, bool kick_q) 619 { 620 struct vmpacket_descriptor desc; 621 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; 622 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 623 struct kvec bufferlist[3]; 624 u64 aligned_data = 0; 625 int ret; 626 bool signal = false; 627 int num_vecs = ((bufferlen != 0) ? 3 : 1); 628 629 630 /* Setup the descriptor */ 631 desc.type = type; /* VmbusPacketTypeDataInBand; */ 632 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */ 633 /* in 8-bytes granularity */ 634 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3; 635 desc.len8 = (u16)(packetlen_aligned >> 3); 636 desc.trans_id = requestid; 637 638 bufferlist[0].iov_base = &desc; 639 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor); 640 bufferlist[1].iov_base = buffer; 641 bufferlist[1].iov_len = bufferlen; 642 bufferlist[2].iov_base = &aligned_data; 643 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 644 645 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, 646 &signal); 647 648 /* 649 * Signalling the host is conditional on many factors: 650 * 1. The ring state changed from being empty to non-empty. 651 * This is tracked by the variable "signal". 652 * 2. The variable kick_q tracks if more data will be placed 653 * on the ring. We will not signal if more data is 654 * to be placed. 655 * 656 * Based on the channel signal state, we will decide 657 * which signaling policy will be applied. 658 * 659 * If we cannot write to the ring-buffer; signal the host 660 * even if we may not have written anything. This is a rare 661 * enough condition that it should not matter. 662 */ 663 664 if (channel->signal_policy) 665 signal = true; 666 else 667 kick_q = true; 668 669 if (((ret == 0) && kick_q && signal) || (ret)) 670 vmbus_setevent(channel); 671 672 return ret; 673 } 674 EXPORT_SYMBOL(vmbus_sendpacket_ctl); 675 676 /** 677 * vmbus_sendpacket() - Send the specified buffer on the given channel 678 * @channel: Pointer to vmbus_channel structure. 679 * @buffer: Pointer to the buffer you want to receive the data into. 680 * @bufferlen: Maximum size of what the the buffer will hold 681 * @requestid: Identifier of the request 682 * @type: Type of packet that is being send e.g. negotiate, time 683 * packet etc. 684 * 685 * Sends data in @buffer directly to hyper-v via the vmbus 686 * This will send the data unparsed to hyper-v. 687 * 688 * Mainly used by Hyper-V drivers. 689 */ 690 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, 691 u32 bufferlen, u64 requestid, 692 enum vmbus_packet_type type, u32 flags) 693 { 694 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid, 695 type, flags, true); 696 } 697 EXPORT_SYMBOL(vmbus_sendpacket); 698 699 /* 700 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer 701 * packets using a GPADL Direct packet type. This interface allows you 702 * to control notifying the host. This will be useful for sending 703 * batched data. Also the sender can control the send flags 704 * explicitly. 705 */ 706 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, 707 struct hv_page_buffer pagebuffers[], 708 u32 pagecount, void *buffer, u32 bufferlen, 709 u64 requestid, 710 u32 flags, 711 bool kick_q) 712 { 713 int ret; 714 int i; 715 struct vmbus_channel_packet_page_buffer desc; 716 u32 descsize; 717 u32 packetlen; 718 u32 packetlen_aligned; 719 struct kvec bufferlist[3]; 720 u64 aligned_data = 0; 721 bool signal = false; 722 723 if (pagecount > MAX_PAGE_BUFFER_COUNT) 724 return -EINVAL; 725 726 727 /* 728 * Adjust the size down since vmbus_channel_packet_page_buffer is the 729 * largest size we support 730 */ 731 descsize = sizeof(struct vmbus_channel_packet_page_buffer) - 732 ((MAX_PAGE_BUFFER_COUNT - pagecount) * 733 sizeof(struct hv_page_buffer)); 734 packetlen = descsize + bufferlen; 735 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 736 737 /* Setup the descriptor */ 738 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; 739 desc.flags = flags; 740 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */ 741 desc.length8 = (u16)(packetlen_aligned >> 3); 742 desc.transactionid = requestid; 743 desc.rangecount = pagecount; 744 745 for (i = 0; i < pagecount; i++) { 746 desc.range[i].len = pagebuffers[i].len; 747 desc.range[i].offset = pagebuffers[i].offset; 748 desc.range[i].pfn = pagebuffers[i].pfn; 749 } 750 751 bufferlist[0].iov_base = &desc; 752 bufferlist[0].iov_len = descsize; 753 bufferlist[1].iov_base = buffer; 754 bufferlist[1].iov_len = bufferlen; 755 bufferlist[2].iov_base = &aligned_data; 756 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 757 758 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 759 760 /* 761 * Signalling the host is conditional on many factors: 762 * 1. The ring state changed from being empty to non-empty. 763 * This is tracked by the variable "signal". 764 * 2. The variable kick_q tracks if more data will be placed 765 * on the ring. We will not signal if more data is 766 * to be placed. 767 * 768 * Based on the channel signal state, we will decide 769 * which signaling policy will be applied. 770 * 771 * If we cannot write to the ring-buffer; signal the host 772 * even if we may not have written anything. This is a rare 773 * enough condition that it should not matter. 774 */ 775 776 if (channel->signal_policy) 777 signal = true; 778 else 779 kick_q = true; 780 781 if (((ret == 0) && kick_q && signal) || (ret)) 782 vmbus_setevent(channel); 783 784 return ret; 785 } 786 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); 787 788 /* 789 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer 790 * packets using a GPADL Direct packet type. 791 */ 792 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 793 struct hv_page_buffer pagebuffers[], 794 u32 pagecount, void *buffer, u32 bufferlen, 795 u64 requestid) 796 { 797 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 798 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount, 799 buffer, bufferlen, requestid, 800 flags, true); 801 802 } 803 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); 804 805 /* 806 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet 807 * using a GPADL Direct packet type. 808 * The buffer includes the vmbus descriptor. 809 */ 810 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 811 struct vmbus_packet_mpb_array *desc, 812 u32 desc_size, 813 void *buffer, u32 bufferlen, u64 requestid) 814 { 815 int ret; 816 u32 packetlen; 817 u32 packetlen_aligned; 818 struct kvec bufferlist[3]; 819 u64 aligned_data = 0; 820 bool signal = false; 821 822 packetlen = desc_size + bufferlen; 823 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 824 825 /* Setup the descriptor */ 826 desc->type = VM_PKT_DATA_USING_GPA_DIRECT; 827 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 828 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */ 829 desc->length8 = (u16)(packetlen_aligned >> 3); 830 desc->transactionid = requestid; 831 desc->rangecount = 1; 832 833 bufferlist[0].iov_base = desc; 834 bufferlist[0].iov_len = desc_size; 835 bufferlist[1].iov_base = buffer; 836 bufferlist[1].iov_len = bufferlen; 837 bufferlist[2].iov_base = &aligned_data; 838 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 839 840 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 841 842 if (ret == 0 && signal) 843 vmbus_setevent(channel); 844 845 return ret; 846 } 847 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 848 849 /* 850 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet 851 * using a GPADL Direct packet type. 852 */ 853 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 854 struct hv_multipage_buffer *multi_pagebuffer, 855 void *buffer, u32 bufferlen, u64 requestid) 856 { 857 int ret; 858 struct vmbus_channel_packet_multipage_buffer desc; 859 u32 descsize; 860 u32 packetlen; 861 u32 packetlen_aligned; 862 struct kvec bufferlist[3]; 863 u64 aligned_data = 0; 864 bool signal = false; 865 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 866 multi_pagebuffer->len); 867 868 if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT) 869 return -EINVAL; 870 871 /* 872 * Adjust the size down since vmbus_channel_packet_multipage_buffer is 873 * the largest size we support 874 */ 875 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) - 876 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) * 877 sizeof(u64)); 878 packetlen = descsize + bufferlen; 879 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 880 881 882 /* Setup the descriptor */ 883 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; 884 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 885 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */ 886 desc.length8 = (u16)(packetlen_aligned >> 3); 887 desc.transactionid = requestid; 888 desc.rangecount = 1; 889 890 desc.range.len = multi_pagebuffer->len; 891 desc.range.offset = multi_pagebuffer->offset; 892 893 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, 894 pfncount * sizeof(u64)); 895 896 bufferlist[0].iov_base = &desc; 897 bufferlist[0].iov_len = descsize; 898 bufferlist[1].iov_base = buffer; 899 bufferlist[1].iov_len = bufferlen; 900 bufferlist[2].iov_base = &aligned_data; 901 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 902 903 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 904 905 if (ret == 0 && signal) 906 vmbus_setevent(channel); 907 908 return ret; 909 } 910 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); 911 912 /** 913 * vmbus_recvpacket() - Retrieve the user packet on the specified channel 914 * @channel: Pointer to vmbus_channel structure. 915 * @buffer: Pointer to the buffer you want to receive the data into. 916 * @bufferlen: Maximum size of what the the buffer will hold 917 * @buffer_actual_len: The actual size of the data after it was received 918 * @requestid: Identifier of the request 919 * 920 * Receives directly from the hyper-v vmbus and puts the data it received 921 * into Buffer. This will receive the data unparsed from hyper-v. 922 * 923 * Mainly used by Hyper-V drivers. 924 */ 925 static inline int 926 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 927 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, 928 bool raw) 929 { 930 int ret; 931 bool signal = false; 932 933 ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen, 934 buffer_actual_len, requestid, &signal, raw); 935 936 if (signal) 937 vmbus_setevent(channel); 938 939 return ret; 940 } 941 942 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, 943 u32 bufferlen, u32 *buffer_actual_len, 944 u64 *requestid) 945 { 946 return __vmbus_recvpacket(channel, buffer, bufferlen, 947 buffer_actual_len, requestid, false); 948 } 949 EXPORT_SYMBOL(vmbus_recvpacket); 950 951 /* 952 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel 953 */ 954 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer, 955 u32 bufferlen, u32 *buffer_actual_len, 956 u64 *requestid) 957 { 958 return __vmbus_recvpacket(channel, buffer, bufferlen, 959 buffer_actual_len, requestid, true); 960 } 961 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw); 962