1 /*- 2 * Copyright (c) 2014 Microsoft Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * Author: Sainath Varanasi. 29 * Date: 4/2012 30 * Email: bsdic@microsoft.com 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/conf.h> 39 #include <sys/uio.h> 40 #include <sys/bus.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/module.h> 44 #include <sys/reboot.h> 45 #include <sys/lock.h> 46 #include <sys/taskqueue.h> 47 #include <sys/selinfo.h> 48 #include <sys/sysctl.h> 49 #include <sys/poll.h> 50 #include <sys/proc.h> 51 #include <sys/kthread.h> 52 #include <sys/syscallsubr.h> 53 #include <sys/sysproto.h> 54 #include <sys/un.h> 55 #include <sys/endian.h> 56 #include <sys/_null.h> 57 #include <sys/signal.h> 58 #include <sys/syslog.h> 59 #include <sys/systm.h> 60 #include <sys/mutex.h> 61 #include <net/if_arp.h> 62 63 #include <dev/hyperv/include/hyperv.h> 64 #include <dev/hyperv/netvsc/hv_net_vsc.h> 65 66 #include "unicode.h" 67 #include "hv_kvp.h" 68 69 /* hv_kvp defines */ 70 #define BUFFERSIZE sizeof(struct hv_kvp_msg) 71 #define KVP_SUCCESS 0 72 #define KVP_ERROR 1 73 #define kvp_hdr hdr.kvp_hdr 74 75 /* hv_kvp debug control */ 76 static int hv_kvp_log = 0; 77 SYSCTL_INT(_dev, OID_AUTO, hv_kvp_log, CTLFLAG_RW, &hv_kvp_log, 0, 78 "hv_kvp log"); 79 80 #define hv_kvp_log_error(...) do { \ 81 if (hv_kvp_log > 0) \ 82 log(LOG_ERR, "hv_kvp: " __VA_ARGS__); \ 83 } while (0) 84 85 #define hv_kvp_log_info(...) do { \ 86 if (hv_kvp_log > 1) \ 87 log(LOG_INFO, "hv_kvp: " __VA_ARGS__); \ 88 } while (0) 89 90 /* character device prototypes */ 91 static d_open_t hv_kvp_dev_open; 92 static d_close_t hv_kvp_dev_close; 93 static d_read_t hv_kvp_dev_daemon_read; 94 static d_write_t hv_kvp_dev_daemon_write; 95 static d_poll_t hv_kvp_dev_daemon_poll; 96 97 /* hv_kvp prototypes */ 98 static int hv_kvp_req_in_progress(void); 99 static void hv_kvp_transaction_init(uint32_t, hv_vmbus_channel *, uint64_t, uint8_t *); 100 static void hv_kvp_send_msg_to_daemon(void); 101 static void hv_kvp_process_request(void *context); 102 103 /* hv_kvp character device structure */ 104 static struct cdevsw hv_kvp_cdevsw = 105 { 106 .d_version = D_VERSION, 107 .d_open = hv_kvp_dev_open, 108 .d_close = hv_kvp_dev_close, 109 .d_read = hv_kvp_dev_daemon_read, 110 .d_write = hv_kvp_dev_daemon_write, 111 .d_poll = hv_kvp_dev_daemon_poll, 112 .d_name = "hv_kvp_dev", 113 }; 114 static struct cdev *hv_kvp_dev; 115 static struct hv_kvp_msg *hv_kvp_dev_buf; 116 struct proc *daemon_task; 117 118 static struct selinfo hv_kvp_selinfo; 119 120 /* 121 * Global state to track and synchronize multiple 122 * KVP transaction requests from the host. 123 */ 124 static struct { 125 126 /* Pre-allocated work item for queue */ 127 hv_work_item work_item; 128 129 /* Unless specified the pending mutex should be 130 * used to alter the values of the following paramters: 131 * 1. req_in_progress 132 * 2. req_timed_out 133 * 3. pending_reqs. 134 */ 135 struct mtx pending_mutex; 136 137 /* To track if transaction is active or not */ 138 boolean_t req_in_progress; 139 /* Tracks if daemon did not reply back in time */ 140 boolean_t req_timed_out; 141 /* Tracks if daemon is serving a request currently */ 142 boolean_t daemon_busy; 143 /* Count of KVP requests from Hyper-V. */ 144 uint64_t pending_reqs; 145 146 147 /* Length of host message */ 148 uint32_t host_msg_len; 149 150 /* Pointer to channel */ 151 hv_vmbus_channel *channelp; 152 153 /* Host message id */ 154 uint64_t host_msg_id; 155 156 /* Current kvp message from the host */ 157 struct hv_kvp_msg *host_kvp_msg; 158 159 /* Current kvp message for daemon */ 160 struct hv_kvp_msg daemon_kvp_msg; 161 162 /* Rcv buffer for communicating with the host*/ 163 uint8_t *rcv_buf; 164 165 /* Device semaphore to control communication */ 166 struct sema dev_sema; 167 168 /* Indicates if daemon registered with driver */ 169 boolean_t register_done; 170 171 /* Character device status */ 172 boolean_t dev_accessed; 173 } kvp_globals; 174 175 /* global vars */ 176 MALLOC_DECLARE(M_HV_KVP_DEV_BUF); 177 MALLOC_DEFINE(M_HV_KVP_DEV_BUF, "hv_kvp_dev buffer", "buffer for hv_kvp_dev module"); 178 179 /* 180 * hv_kvp low level functions 181 */ 182 183 /* 184 * Check if kvp transaction is in progres 185 */ 186 static int 187 hv_kvp_req_in_progress(void) 188 { 189 190 return (kvp_globals.req_in_progress); 191 } 192 193 194 /* 195 * This routine is called whenever a message is received from the host 196 */ 197 static void 198 hv_kvp_transaction_init(uint32_t rcv_len, hv_vmbus_channel *rcv_channel, 199 uint64_t request_id, uint8_t *rcv_buf) 200 { 201 202 /* Store all the relevant message details in the global structure */ 203 /* Do not need to use mutex for req_in_progress here */ 204 kvp_globals.req_in_progress = true; 205 kvp_globals.host_msg_len = rcv_len; 206 kvp_globals.channelp = rcv_channel; 207 kvp_globals.host_msg_id = request_id; 208 kvp_globals.rcv_buf = rcv_buf; 209 kvp_globals.host_kvp_msg = (struct hv_kvp_msg *)&rcv_buf[ 210 sizeof(struct hv_vmbus_pipe_hdr) + 211 sizeof(struct hv_vmbus_icmsg_hdr)]; 212 } 213 214 215 /* 216 * hv_kvp - version neogtiation function 217 */ 218 static void 219 hv_kvp_negotiate_version(struct hv_vmbus_icmsg_hdr *icmsghdrp, 220 struct hv_vmbus_icmsg_negotiate *negop, 221 uint8_t *buf) 222 { 223 int icframe_vercnt; 224 int icmsg_vercnt; 225 226 icmsghdrp->icmsgsize = 0x10; 227 228 negop = (struct hv_vmbus_icmsg_negotiate *)&buf[ 229 sizeof(struct hv_vmbus_pipe_hdr) + 230 sizeof(struct hv_vmbus_icmsg_hdr)]; 231 icframe_vercnt = negop->icframe_vercnt; 232 icmsg_vercnt = negop->icmsg_vercnt; 233 234 /* 235 * Select the framework version number we will support 236 */ 237 if ((icframe_vercnt >= 2) && (negop->icversion_data[1].major == 3)) { 238 icframe_vercnt = 3; 239 if (icmsg_vercnt > 2) 240 icmsg_vercnt = 4; 241 else 242 icmsg_vercnt = 3; 243 } else { 244 icframe_vercnt = 1; 245 icmsg_vercnt = 1; 246 } 247 248 negop->icframe_vercnt = 1; 249 negop->icmsg_vercnt = 1; 250 negop->icversion_data[0].major = icframe_vercnt; 251 negop->icversion_data[0].minor = 0; 252 negop->icversion_data[1].major = icmsg_vercnt; 253 negop->icversion_data[1].minor = 0; 254 } 255 256 257 /* 258 * Convert ip related info in umsg from utf8 to utf16 and store in hmsg 259 */ 260 static int 261 hv_kvp_convert_utf8_ipinfo_to_utf16(struct hv_kvp_msg *umsg, 262 struct hv_kvp_ip_msg *host_ip_msg) 263 { 264 int err_ip, err_subnet, err_gway, err_dns, err_adap; 265 int UNUSED_FLAG = 1; 266 267 utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, 268 MAX_IP_ADDR_SIZE, 269 (char *)umsg->body.kvp_ip_val.ip_addr, 270 strlen((char *)umsg->body.kvp_ip_val.ip_addr), 271 UNUSED_FLAG, 272 &err_ip); 273 utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.sub_net, 274 MAX_IP_ADDR_SIZE, 275 (char *)umsg->body.kvp_ip_val.sub_net, 276 strlen((char *)umsg->body.kvp_ip_val.sub_net), 277 UNUSED_FLAG, 278 &err_subnet); 279 utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.gate_way, 280 MAX_GATEWAY_SIZE, 281 (char *)umsg->body.kvp_ip_val.gate_way, 282 strlen((char *)umsg->body.kvp_ip_val.gate_way), 283 UNUSED_FLAG, 284 &err_gway); 285 utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, 286 MAX_IP_ADDR_SIZE, 287 (char *)umsg->body.kvp_ip_val.dns_addr, 288 strlen((char *)umsg->body.kvp_ip_val.dns_addr), 289 UNUSED_FLAG, 290 &err_dns); 291 utf8_to_utf16((uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, 292 MAX_IP_ADDR_SIZE, 293 (char *)umsg->body.kvp_ip_val.adapter_id, 294 strlen((char *)umsg->body.kvp_ip_val.adapter_id), 295 UNUSED_FLAG, 296 &err_adap); 297 298 host_ip_msg->kvp_ip_val.dhcp_enabled = umsg->body.kvp_ip_val.dhcp_enabled; 299 host_ip_msg->kvp_ip_val.addr_family = umsg->body.kvp_ip_val.addr_family; 300 301 return (err_ip | err_subnet | err_gway | err_dns | err_adap); 302 } 303 304 305 /* 306 * Convert ip related info in hmsg from utf16 to utf8 and store in umsg 307 */ 308 static int 309 hv_kvp_convert_utf16_ipinfo_to_utf8(struct hv_kvp_ip_msg *host_ip_msg, 310 struct hv_kvp_msg *umsg) 311 { 312 int err_ip, err_subnet, err_gway, err_dns, err_adap; 313 int UNUSED_FLAG = 1; 314 int guid_index; 315 struct hv_device *hv_dev; /* GUID Data Structure */ 316 hn_softc_t *sc; /* hn softc structure */ 317 char if_name[4]; 318 unsigned char guid_instance[40]; 319 char *guid_data = NULL; 320 char buf[39]; 321 322 struct guid_extract { 323 char a1[2]; 324 char a2[2]; 325 char a3[2]; 326 char a4[2]; 327 char b1[2]; 328 char b2[2]; 329 char c1[2]; 330 char c2[2]; 331 char d[4]; 332 char e[12]; 333 }; 334 335 struct guid_extract *id; 336 device_t *devs; 337 int devcnt; 338 339 /* IP Address */ 340 utf16_to_utf8((char *)umsg->body.kvp_ip_val.ip_addr, 341 MAX_IP_ADDR_SIZE, 342 (uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, 343 MAX_IP_ADDR_SIZE, 344 UNUSED_FLAG, 345 &err_ip); 346 347 /* Adapter ID : GUID */ 348 utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, 349 MAX_ADAPTER_ID_SIZE, 350 (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, 351 MAX_ADAPTER_ID_SIZE, 352 UNUSED_FLAG, 353 &err_adap); 354 355 if (devclass_get_devices(devclass_find("hn"), &devs, &devcnt) == 0) { 356 for (devcnt = devcnt - 1; devcnt >= 0; devcnt--) { 357 sc = device_get_softc(devs[devcnt]); 358 359 /* Trying to find GUID of Network Device */ 360 hv_dev = sc->hn_dev_obj; 361 362 for (guid_index = 0; guid_index < 16; guid_index++) { 363 sprintf(&guid_instance[guid_index * 2], "%02x", 364 hv_dev->device_id.data[guid_index]); 365 } 366 367 guid_data = (char *)guid_instance; 368 id = (struct guid_extract *)guid_data; 369 snprintf(buf, sizeof(buf), "{%.2s%.2s%.2s%.2s-%.2s%.2s-%.2s%.2s-%.4s-%s}", 370 id->a4, id->a3, id->a2, id->a1, 371 id->b2, id->b1, id->c2, id->c1, id->d, id->e); 372 guid_data = NULL; 373 sprintf(if_name, "%s%d", "hn", device_get_unit(devs[devcnt])); 374 375 if (strncmp(buf, (char *)umsg->body.kvp_ip_val.adapter_id, 39) == 0) { 376 strcpy((char *)umsg->body.kvp_ip_val.adapter_id, if_name); 377 break; 378 } 379 } 380 free(devs, M_TEMP); 381 } 382 383 /* Address Family , DHCP , SUBNET, Gateway, DNS */ 384 umsg->kvp_hdr.operation = host_ip_msg->operation; 385 umsg->body.kvp_ip_val.addr_family = host_ip_msg->kvp_ip_val.addr_family; 386 umsg->body.kvp_ip_val.dhcp_enabled = host_ip_msg->kvp_ip_val.dhcp_enabled; 387 utf16_to_utf8((char *)umsg->body.kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, 388 (uint16_t *)host_ip_msg->kvp_ip_val.sub_net, 389 MAX_IP_ADDR_SIZE, 390 UNUSED_FLAG, 391 &err_subnet); 392 393 utf16_to_utf8((char *)umsg->body.kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, 394 (uint16_t *)host_ip_msg->kvp_ip_val.gate_way, 395 MAX_GATEWAY_SIZE, 396 UNUSED_FLAG, 397 &err_gway); 398 399 utf16_to_utf8((char *)umsg->body.kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, 400 (uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, 401 MAX_IP_ADDR_SIZE, 402 UNUSED_FLAG, 403 &err_dns); 404 405 return (err_ip | err_subnet | err_gway | err_dns | err_adap); 406 } 407 408 409 /* 410 * Prepare a user kvp msg based on host kvp msg (utf16 to utf8) 411 * Ensure utf16_utf8 takes care of the additional string terminating char!! 412 */ 413 static void 414 hv_kvp_convert_hostmsg_to_usermsg(void) 415 { 416 int utf_err = 0; 417 uint32_t value_type; 418 struct hv_kvp_ip_msg *host_ip_msg = (struct hv_kvp_ip_msg *) 419 kvp_globals.host_kvp_msg; 420 421 struct hv_kvp_msg *hmsg = kvp_globals.host_kvp_msg; 422 struct hv_kvp_msg *umsg = &kvp_globals.daemon_kvp_msg; 423 424 memset(umsg, 0, sizeof(struct hv_kvp_msg)); 425 426 umsg->kvp_hdr.operation = hmsg->kvp_hdr.operation; 427 umsg->kvp_hdr.pool = hmsg->kvp_hdr.pool; 428 429 switch (umsg->kvp_hdr.operation) { 430 case HV_KVP_OP_SET_IP_INFO: 431 hv_kvp_convert_utf16_ipinfo_to_utf8(host_ip_msg, umsg); 432 break; 433 434 case HV_KVP_OP_GET_IP_INFO: 435 utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, 436 MAX_ADAPTER_ID_SIZE, 437 (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, 438 MAX_ADAPTER_ID_SIZE, 1, &utf_err); 439 440 umsg->body.kvp_ip_val.addr_family = 441 host_ip_msg->kvp_ip_val.addr_family; 442 break; 443 444 case HV_KVP_OP_SET: 445 value_type = hmsg->body.kvp_set.data.value_type; 446 447 switch (value_type) { 448 case HV_REG_SZ: 449 umsg->body.kvp_set.data.value_size = 450 utf16_to_utf8( 451 (char *)umsg->body.kvp_set.data.msg_value.value, 452 HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1, 453 (uint16_t *)hmsg->body.kvp_set.data.msg_value.value, 454 hmsg->body.kvp_set.data.value_size, 455 1, &utf_err); 456 /* utf8 encoding */ 457 umsg->body.kvp_set.data.value_size = 458 umsg->body.kvp_set.data.value_size / 2; 459 break; 460 461 case HV_REG_U32: 462 umsg->body.kvp_set.data.value_size = 463 sprintf(umsg->body.kvp_set.data.msg_value.value, "%d", 464 hmsg->body.kvp_set.data.msg_value.value_u32) + 1; 465 break; 466 467 case HV_REG_U64: 468 umsg->body.kvp_set.data.value_size = 469 sprintf(umsg->body.kvp_set.data.msg_value.value, "%llu", 470 (unsigned long long) 471 hmsg->body.kvp_set.data.msg_value.value_u64) + 1; 472 break; 473 } 474 475 umsg->body.kvp_set.data.key_size = 476 utf16_to_utf8( 477 umsg->body.kvp_set.data.key, 478 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1, 479 (uint16_t *)hmsg->body.kvp_set.data.key, 480 hmsg->body.kvp_set.data.key_size, 481 1, &utf_err); 482 483 /* utf8 encoding */ 484 umsg->body.kvp_set.data.key_size = 485 umsg->body.kvp_set.data.key_size / 2; 486 break; 487 488 case HV_KVP_OP_GET: 489 umsg->body.kvp_get.data.key_size = 490 utf16_to_utf8(umsg->body.kvp_get.data.key, 491 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1, 492 (uint16_t *)hmsg->body.kvp_get.data.key, 493 hmsg->body.kvp_get.data.key_size, 494 1, &utf_err); 495 /* utf8 encoding */ 496 umsg->body.kvp_get.data.key_size = 497 umsg->body.kvp_get.data.key_size / 2; 498 break; 499 500 case HV_KVP_OP_DELETE: 501 umsg->body.kvp_delete.key_size = 502 utf16_to_utf8(umsg->body.kvp_delete.key, 503 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1, 504 (uint16_t *)hmsg->body.kvp_delete.key, 505 hmsg->body.kvp_delete.key_size, 506 1, &utf_err); 507 /* utf8 encoding */ 508 umsg->body.kvp_delete.key_size = 509 umsg->body.kvp_delete.key_size / 2; 510 break; 511 512 case HV_KVP_OP_ENUMERATE: 513 umsg->body.kvp_enum_data.index = 514 hmsg->body.kvp_enum_data.index; 515 break; 516 517 default: 518 hv_kvp_log_info("%s: daemon_kvp_msg: Invalid operation : %d\n", 519 __func__, umsg->kvp_hdr.operation); 520 } 521 } 522 523 524 /* 525 * Prepare a host kvp msg based on user kvp msg (utf8 to utf16) 526 */ 527 static int 528 hv_kvp_convert_usermsg_to_hostmsg(void) 529 { 530 int hkey_len = 0, hvalue_len = 0, utf_err = 0; 531 struct hv_kvp_exchg_msg_value *host_exchg_data; 532 char *key_name, *value; 533 534 struct hv_kvp_msg *umsg = &kvp_globals.daemon_kvp_msg; 535 struct hv_kvp_msg *hmsg = kvp_globals.host_kvp_msg; 536 struct hv_kvp_ip_msg *host_ip_msg = (struct hv_kvp_ip_msg *)hmsg; 537 538 switch (hmsg->kvp_hdr.operation) { 539 case HV_KVP_OP_GET_IP_INFO: 540 return (hv_kvp_convert_utf8_ipinfo_to_utf16(umsg, host_ip_msg)); 541 542 case HV_KVP_OP_SET_IP_INFO: 543 case HV_KVP_OP_SET: 544 case HV_KVP_OP_DELETE: 545 return (KVP_SUCCESS); 546 547 case HV_KVP_OP_ENUMERATE: 548 host_exchg_data = &hmsg->body.kvp_enum_data.data; 549 key_name = umsg->body.kvp_enum_data.data.key; 550 hkey_len = utf8_to_utf16((uint16_t *)host_exchg_data->key, 551 ((HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2) - 2), 552 key_name, strlen(key_name), 553 1, &utf_err); 554 /* utf16 encoding */ 555 host_exchg_data->key_size = 2 * (hkey_len + 1); 556 value = umsg->body.kvp_enum_data.data.msg_value.value; 557 hvalue_len = utf8_to_utf16( 558 (uint16_t *)host_exchg_data->msg_value.value, 559 ((HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2), 560 value, strlen(value), 561 1, &utf_err); 562 host_exchg_data->value_size = 2 * (hvalue_len + 1); 563 host_exchg_data->value_type = HV_REG_SZ; 564 565 if ((hkey_len < 0) || (hvalue_len < 0)) 566 return (HV_KVP_E_FAIL); 567 568 return (KVP_SUCCESS); 569 570 case HV_KVP_OP_GET: 571 host_exchg_data = &hmsg->body.kvp_get.data; 572 value = umsg->body.kvp_get.data.msg_value.value; 573 hvalue_len = utf8_to_utf16( 574 (uint16_t *)host_exchg_data->msg_value.value, 575 ((HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2) - 2), 576 value, strlen(value), 577 1, &utf_err); 578 /* Convert value size to uft16 */ 579 host_exchg_data->value_size = 2 * (hvalue_len + 1); 580 /* Use values by string */ 581 host_exchg_data->value_type = HV_REG_SZ; 582 583 if ((hkey_len < 0) || (hvalue_len < 0)) 584 return (HV_KVP_E_FAIL); 585 586 return (KVP_SUCCESS); 587 588 default: 589 return (HV_KVP_E_FAIL); 590 } 591 } 592 593 594 /* 595 * Send the response back to the host. 596 */ 597 static void 598 hv_kvp_respond_host(int error) 599 { 600 struct hv_vmbus_icmsg_hdr *hv_icmsg_hdrp; 601 602 hv_icmsg_hdrp = (struct hv_vmbus_icmsg_hdr *) 603 &kvp_globals.rcv_buf[sizeof(struct hv_vmbus_pipe_hdr)]; 604 605 if (error) 606 error = HV_KVP_E_FAIL; 607 608 hv_icmsg_hdrp->status = error; 609 hv_icmsg_hdrp->icflags = HV_ICMSGHDRFLAG_TRANSACTION | HV_ICMSGHDRFLAG_RESPONSE; 610 611 error = hv_vmbus_channel_send_packet(kvp_globals.channelp, 612 kvp_globals.rcv_buf, 613 kvp_globals.host_msg_len, kvp_globals.host_msg_id, 614 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0); 615 616 if (error) 617 hv_kvp_log_info("%s: hv_kvp_respond_host: sendpacket error:%d\n", 618 __func__, error); 619 } 620 621 622 /* 623 * This is the main kvp kernel process that interacts with both user daemon 624 * and the host 625 */ 626 static void 627 hv_kvp_send_msg_to_daemon(void) 628 { 629 /* Prepare kvp_msg to be sent to user */ 630 hv_kvp_convert_hostmsg_to_usermsg(); 631 632 /* Send the msg to user via function deamon_read - setting sema */ 633 sema_post(&kvp_globals.dev_sema); 634 635 /* We should wake up the daemon, in case it's doing poll() */ 636 selwakeup(&hv_kvp_selinfo); 637 } 638 639 640 /* 641 * Function to read the kvp request buffer from host 642 * and interact with daemon 643 */ 644 static void 645 hv_kvp_process_request(void *context) 646 { 647 uint8_t *kvp_buf; 648 hv_vmbus_channel *channel = context; 649 uint32_t recvlen = 0; 650 uint64_t requestid; 651 struct hv_vmbus_icmsg_hdr *icmsghdrp; 652 int ret = 0; 653 uint64_t pending_cnt = 1; 654 655 hv_kvp_log_info("%s: entering hv_kvp_process_request\n", __func__); 656 kvp_buf = receive_buffer[HV_KVP]; 657 ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE, 658 &recvlen, &requestid); 659 660 /* 661 * We start counting only after the daemon registers 662 * and therefore there could be requests pending in 663 * the VMBus that are not reflected in pending_cnt. 664 * Therefore we continue reading as long as either of 665 * the below conditions is true. 666 */ 667 668 while ((pending_cnt>0) || ((ret == 0) && (recvlen > 0))) { 669 670 if ((ret == 0) && (recvlen>0)) { 671 672 icmsghdrp = (struct hv_vmbus_icmsg_hdr *) 673 &kvp_buf[sizeof(struct hv_vmbus_pipe_hdr)]; 674 675 hv_kvp_transaction_init(recvlen, channel, requestid, kvp_buf); 676 if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) { 677 hv_kvp_negotiate_version(icmsghdrp, NULL, kvp_buf); 678 hv_kvp_respond_host(ret); 679 680 /* 681 * It is ok to not acquire the mutex before setting 682 * req_in_progress here because negotiation is the 683 * first thing that happens and hence there is no 684 * chance of a race condition. 685 */ 686 687 kvp_globals.req_in_progress = false; 688 hv_kvp_log_info("%s :version negotiated\n", __func__); 689 690 } else { 691 if (!kvp_globals.daemon_busy) { 692 693 hv_kvp_log_info("%s: issuing qury to daemon\n", __func__); 694 mtx_lock(&kvp_globals.pending_mutex); 695 kvp_globals.req_timed_out = false; 696 kvp_globals.daemon_busy = true; 697 mtx_unlock(&kvp_globals.pending_mutex); 698 699 hv_kvp_send_msg_to_daemon(); 700 hv_kvp_log_info("%s: waiting for daemon\n", __func__); 701 } 702 703 /* Wait 5 seconds for daemon to respond back */ 704 tsleep(&kvp_globals, 0, "kvpworkitem", 5 * hz); 705 hv_kvp_log_info("%s: came out of wait\n", __func__); 706 } 707 } 708 709 mtx_lock(&kvp_globals.pending_mutex); 710 711 /* Notice that once req_timed_out is set to true 712 * it will remain true until the next request is 713 * sent to the daemon. The response from daemon 714 * is forwarded to host only when this flag is 715 * false. 716 */ 717 kvp_globals.req_timed_out = true; 718 719 /* 720 * Cancel request if so need be. 721 */ 722 if (hv_kvp_req_in_progress()) { 723 hv_kvp_log_info("%s: request was still active after wait so failing\n", __func__); 724 hv_kvp_respond_host(HV_KVP_E_FAIL); 725 kvp_globals.req_in_progress = false; 726 } 727 728 /* 729 * Decrement pending request count and 730 */ 731 if (kvp_globals.pending_reqs>0) { 732 kvp_globals.pending_reqs = kvp_globals.pending_reqs - 1; 733 } 734 pending_cnt = kvp_globals.pending_reqs; 735 736 mtx_unlock(&kvp_globals.pending_mutex); 737 738 /* 739 * Try reading next buffer 740 */ 741 recvlen = 0; 742 ret = hv_vmbus_channel_recv_packet(channel, kvp_buf, 2 * PAGE_SIZE, 743 &recvlen, &requestid); 744 hv_kvp_log_info("%s: read: context %p, pending_cnt %llu ret =%d, recvlen=%d\n", 745 __func__, context, (unsigned long long)pending_cnt, ret, recvlen); 746 } 747 } 748 749 750 /* 751 * Callback routine that gets called whenever there is a message from host 752 */ 753 void 754 hv_kvp_callback(void *context) 755 { 756 uint64_t pending_cnt = 0; 757 758 if (kvp_globals.register_done == false) { 759 760 kvp_globals.channelp = context; 761 } else { 762 763 mtx_lock(&kvp_globals.pending_mutex); 764 kvp_globals.pending_reqs = kvp_globals.pending_reqs + 1; 765 pending_cnt = kvp_globals.pending_reqs; 766 mtx_unlock(&kvp_globals.pending_mutex); 767 if (pending_cnt == 1) { 768 hv_kvp_log_info("%s: Queuing work item\n", __func__); 769 hv_queue_work_item( 770 service_table[HV_KVP].work_queue, 771 hv_kvp_process_request, 772 context 773 ); 774 } 775 } 776 } 777 778 779 /* 780 * This function is called by the hv_kvp_init - 781 * creates character device hv_kvp_dev 782 * allocates memory to hv_kvp_dev_buf 783 * 784 */ 785 static int 786 hv_kvp_dev_init(void) 787 { 788 int error = 0; 789 790 /* initialize semaphore */ 791 sema_init(&kvp_globals.dev_sema, 0, "hv_kvp device semaphore"); 792 /* create character device */ 793 error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, 794 &hv_kvp_dev, 795 &hv_kvp_cdevsw, 796 0, 797 UID_ROOT, 798 GID_WHEEL, 799 0640, 800 "hv_kvp_dev"); 801 802 if (error != 0) 803 return (error); 804 805 /* 806 * Malloc with M_WAITOK flag will never fail. 807 */ 808 hv_kvp_dev_buf = malloc(sizeof(*hv_kvp_dev_buf), M_HV_KVP_DEV_BUF, M_WAITOK | 809 M_ZERO); 810 811 return (0); 812 } 813 814 815 /* 816 * This function is called by the hv_kvp_deinit - 817 * destroy character device 818 */ 819 static void 820 hv_kvp_dev_destroy(void) 821 { 822 823 if (daemon_task != NULL) { 824 PROC_LOCK(daemon_task); 825 kern_psignal(daemon_task, SIGKILL); 826 PROC_UNLOCK(daemon_task); 827 } 828 829 destroy_dev(hv_kvp_dev); 830 free(hv_kvp_dev_buf, M_HV_KVP_DEV_BUF); 831 return; 832 } 833 834 835 static int 836 hv_kvp_dev_open(struct cdev *dev, int oflags, int devtype, 837 struct thread *td) 838 { 839 840 hv_kvp_log_info("%s: Opened device \"hv_kvp_device\" successfully.\n", __func__); 841 if (kvp_globals.dev_accessed) 842 return (-EBUSY); 843 844 daemon_task = curproc; 845 kvp_globals.dev_accessed = true; 846 kvp_globals.daemon_busy = false; 847 return (0); 848 } 849 850 851 static int 852 hv_kvp_dev_close(struct cdev *dev __unused, int fflag __unused, int devtype __unused, 853 struct thread *td __unused) 854 { 855 856 hv_kvp_log_info("%s: Closing device \"hv_kvp_device\".\n", __func__); 857 kvp_globals.dev_accessed = false; 858 kvp_globals.register_done = false; 859 return (0); 860 } 861 862 863 /* 864 * hv_kvp_daemon read invokes this function 865 * acts as a send to daemon 866 */ 867 static int 868 hv_kvp_dev_daemon_read(struct cdev *dev __unused, struct uio *uio, int ioflag __unused) 869 { 870 size_t amt; 871 int error = 0; 872 873 /* Check hv_kvp daemon registration status*/ 874 if (!kvp_globals.register_done) 875 return (KVP_ERROR); 876 877 sema_wait(&kvp_globals.dev_sema); 878 879 memcpy(hv_kvp_dev_buf, &kvp_globals.daemon_kvp_msg, sizeof(struct hv_kvp_msg)); 880 881 amt = MIN(uio->uio_resid, uio->uio_offset >= BUFFERSIZE + 1 ? 0 : 882 BUFFERSIZE + 1 - uio->uio_offset); 883 884 if ((error = uiomove(hv_kvp_dev_buf, amt, uio)) != 0) 885 hv_kvp_log_info("%s: hv_kvp uiomove read failed!\n", __func__); 886 887 return (error); 888 } 889 890 891 /* 892 * hv_kvp_daemon write invokes this function 893 * acts as a recieve from daemon 894 */ 895 static int 896 hv_kvp_dev_daemon_write(struct cdev *dev __unused, struct uio *uio, int ioflag __unused) 897 { 898 size_t amt; 899 int error = 0; 900 901 uio->uio_offset = 0; 902 903 amt = MIN(uio->uio_resid, BUFFERSIZE); 904 error = uiomove(hv_kvp_dev_buf, amt, uio); 905 906 if (error != 0) 907 return (error); 908 909 memcpy(&kvp_globals.daemon_kvp_msg, hv_kvp_dev_buf, sizeof(struct hv_kvp_msg)); 910 911 if (kvp_globals.register_done == false) { 912 if (kvp_globals.daemon_kvp_msg.kvp_hdr.operation == HV_KVP_OP_REGISTER) { 913 914 kvp_globals.register_done = true; 915 if (kvp_globals.channelp) { 916 917 hv_kvp_callback(kvp_globals.channelp); 918 } 919 } 920 else { 921 hv_kvp_log_info("%s, KVP Registration Failed\n", __func__); 922 return (KVP_ERROR); 923 } 924 } else { 925 926 mtx_lock(&kvp_globals.pending_mutex); 927 928 if(!kvp_globals.req_timed_out) { 929 930 hv_kvp_convert_usermsg_to_hostmsg(); 931 hv_kvp_respond_host(KVP_SUCCESS); 932 wakeup(&kvp_globals); 933 kvp_globals.req_in_progress = false; 934 } 935 936 kvp_globals.daemon_busy = false; 937 mtx_unlock(&kvp_globals.pending_mutex); 938 } 939 940 return (error); 941 } 942 943 944 /* 945 * hv_kvp_daemon poll invokes this function to check if data is available 946 * for daemon to read. 947 */ 948 static int 949 hv_kvp_dev_daemon_poll(struct cdev *dev __unused, int events, struct thread *td) 950 { 951 int revents = 0; 952 953 mtx_lock(&kvp_globals.pending_mutex); 954 /* 955 * We check global flag daemon_busy for the data availiability for 956 * userland to read. Deamon_busy is set to true before driver has data 957 * for daemon to read. It is set to false after daemon sends 958 * then response back to driver. 959 */ 960 if (kvp_globals.daemon_busy == true) 961 revents = POLLIN; 962 else 963 selrecord(td, &hv_kvp_selinfo); 964 965 mtx_unlock(&kvp_globals.pending_mutex); 966 967 return (revents); 968 } 969 970 971 /* 972 * hv_kvp initialization function 973 * called from hv_util service. 974 * 975 */ 976 int 977 hv_kvp_init(hv_vmbus_service *srv) 978 { 979 int error = 0; 980 hv_work_queue *work_queue = NULL; 981 982 memset(&kvp_globals, 0, sizeof(kvp_globals)); 983 984 work_queue = hv_work_queue_create("KVP Service"); 985 if (work_queue == NULL) { 986 hv_kvp_log_info("%s: Work queue alloc failed\n", __func__); 987 error = ENOMEM; 988 hv_kvp_log_error("%s: ENOMEM\n", __func__); 989 goto Finish; 990 } 991 srv->work_queue = work_queue; 992 993 error = hv_kvp_dev_init(); 994 mtx_init(&kvp_globals.pending_mutex, "hv-kvp pending mutex", 995 NULL, MTX_DEF); 996 kvp_globals.pending_reqs = 0; 997 998 999 Finish: 1000 return (error); 1001 } 1002 1003 1004 void 1005 hv_kvp_deinit(void) 1006 { 1007 hv_kvp_dev_destroy(); 1008 mtx_destroy(&kvp_globals.pending_mutex); 1009 1010 return; 1011 } 1012