1 /*- 2 * Copyright (c) 2018 VMware, Inc. 3 * 4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0) 5 */ 6 7 /* VMCI initialization. */ 8 9 #include <sys/cdefs.h> 10 __FBSDID("$FreeBSD$"); 11 12 #include "vmci.h" 13 #include "vmci_doorbell.h" 14 #include "vmci_driver.h" 15 #include "vmci_event.h" 16 #include "vmci_kernel_api.h" 17 #include "vmci_kernel_defs.h" 18 #include "vmci_resource.h" 19 20 #define LGPFX "vmci: " 21 #define VMCI_UTIL_NUM_RESOURCES 1 22 23 static vmci_id ctx_update_sub_id = VMCI_INVALID_ID; 24 static volatile int vm_context_id = VMCI_INVALID_ID; 25 26 /* 27 *------------------------------------------------------------------------------ 28 * 29 * vmci_util_cid_update -- 30 * 31 * Gets called with the new context id if updated or resumed. 32 * 33 * Results: 34 * Context id. 35 * 36 * Side effects: 37 * None. 38 * 39 *------------------------------------------------------------------------------ 40 */ 41 42 static void 43 vmci_util_cid_update(vmci_id sub_id, struct vmci_event_data *event_data, 44 void *client_data) 45 { 46 struct vmci_event_payload_context *ev_payload; 47 48 ev_payload = vmci_event_data_payload(event_data); 49 50 if (sub_id != ctx_update_sub_id) { 51 VMCI_LOG_DEBUG(LGPFX"Invalid subscriber (ID=0x%x).\n", sub_id); 52 return; 53 } 54 if (event_data == NULL || ev_payload->context_id == VMCI_INVALID_ID) { 55 VMCI_LOG_DEBUG(LGPFX"Invalid event data.\n"); 56 return; 57 } 58 VMCI_LOG_INFO(LGPFX"Updating context from (ID=0x%x) to (ID=0x%x) on " 59 "event (type=%d).\n", atomic_load_int(&vm_context_id), 60 ev_payload->context_id, event_data->event); 61 atomic_store_int(&vm_context_id, ev_payload->context_id); 62 } 63 64 /* 65 *------------------------------------------------------------------------------ 66 * 67 * vmci_util_init -- 68 * 69 * Subscribe to context id update event. 70 * 71 * Results: 72 * None. 73 * 74 * Side effects: 75 * None. 76 * 77 *------------------------------------------------------------------------------ 78 */ 79 80 void 81 vmci_util_init(void) 82 { 83 84 /* 85 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can update 86 * the internal context id when needed. 87 */ 88 if (vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, 89 vmci_util_cid_update, NULL, &ctx_update_sub_id) < VMCI_SUCCESS) { 90 VMCI_LOG_WARNING(LGPFX"Failed to subscribe to event " 91 "(type=%d).\n", VMCI_EVENT_CTX_ID_UPDATE); 92 } 93 } 94 95 /* 96 *------------------------------------------------------------------------------ 97 * 98 * vmci_util_exit -- 99 * 100 * Cleanup 101 * 102 * Results: 103 * None. 104 * 105 * Side effects: 106 * None. 107 * 108 *------------------------------------------------------------------------------ 109 */ 110 111 void 112 vmci_util_exit(void) 113 { 114 115 if (vmci_event_unsubscribe(ctx_update_sub_id) < VMCI_SUCCESS) 116 VMCI_LOG_WARNING(LGPFX"Failed to unsubscribe to event " 117 "(type=%d) with subscriber (ID=0x%x).\n", 118 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id); 119 } 120 121 /* 122 *------------------------------------------------------------------------------ 123 * 124 * vmci_util_check_host_capabilities -- 125 * 126 * Verify that the host supports the hypercalls we need. If it does not, try 127 * to find fallback hypercalls and use those instead. 128 * 129 * Results: 130 * true if required hypercalls (or fallback hypercalls) are supported by the 131 * host, false otherwise. 132 * 133 * Side effects: 134 * None. 135 * 136 *------------------------------------------------------------------------------ 137 */ 138 139 static bool 140 vmci_util_check_host_capabilities(void) 141 { 142 struct vmci_resources_query_msg *msg; 143 struct vmci_datagram *check_msg; 144 int result; 145 uint32_t msg_size; 146 147 msg_size = sizeof(struct vmci_resources_query_hdr) + 148 VMCI_UTIL_NUM_RESOURCES * sizeof(vmci_resource); 149 check_msg = vmci_alloc_kernel_mem(msg_size, VMCI_MEMORY_NORMAL); 150 151 if (check_msg == NULL) { 152 VMCI_LOG_WARNING(LGPFX"Check host: Insufficient memory.\n"); 153 return (false); 154 } 155 156 check_msg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, 157 VMCI_RESOURCES_QUERY); 158 check_msg->src = VMCI_ANON_SRC_HANDLE; 159 check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; 160 msg = (struct vmci_resources_query_msg *)VMCI_DG_PAYLOAD(check_msg); 161 162 msg->num_resources = VMCI_UTIL_NUM_RESOURCES; 163 msg->resources[0] = VMCI_GET_CONTEXT_ID; 164 165 result = vmci_send_datagram(check_msg); 166 vmci_free_kernel_mem(check_msg, msg_size); 167 168 /* We need the vector. There are no fallbacks. */ 169 return (result == 0x1); 170 } 171 172 /* 173 *------------------------------------------------------------------------------ 174 * 175 * vmci_check_host_capabilities -- 176 * 177 * Tell host which guestcalls we support and let each API check that the 178 * host supports the hypercalls it needs. If a hypercall is not supported, 179 * the API can check for a fallback hypercall, or fail the check. 180 * 181 * Results: 182 * true if successful, false otherwise. 183 * 184 * Side effects: 185 * Fallback mechanisms may be enabled in the API and vmmon. 186 * 187 *------------------------------------------------------------------------------ 188 */ 189 190 bool 191 vmci_check_host_capabilities(void) 192 { 193 bool result; 194 195 result = vmci_event_check_host_capabilities(); 196 result &= vmci_datagram_check_host_capabilities(); 197 result &= vmci_util_check_host_capabilities(); 198 199 if (!result) { 200 /* 201 * If it failed, then make sure this goes to the system event 202 * log. 203 */ 204 VMCI_LOG_WARNING(LGPFX"Host capability checked failed.\n"); 205 } else 206 VMCI_LOG_DEBUG(LGPFX"Host capability check passed.\n"); 207 208 return (result); 209 } 210 211 /* 212 *------------------------------------------------------------------------------ 213 * 214 * vmci_read_datagrams_from_port -- 215 * 216 * Reads datagrams from the data in port and dispatches them. We always 217 * start reading datagrams into only the first page of the datagram buffer. 218 * If the datagrams don't fit into one page, we use the maximum datagram 219 * buffer size for the remainder of the invocation. This is a simple 220 * heuristic for not penalizing small datagrams. 221 * 222 * This function assumes that it has exclusive access to the data in port 223 * for the duration of the call. 224 * 225 * Results: 226 * No result. 227 * 228 * Side effects: 229 * Datagram handlers may be invoked. 230 * 231 *------------------------------------------------------------------------------ 232 */ 233 234 void 235 vmci_read_datagrams_from_port(vmci_io_handle io_handle, vmci_io_port dg_in_port, 236 uint8_t *dg_in_buffer, size_t dg_in_buffer_size) 237 { 238 struct vmci_datagram *dg; 239 size_t current_dg_in_buffer_size; 240 size_t remaining_bytes; 241 242 current_dg_in_buffer_size = PAGE_SIZE; 243 244 ASSERT(dg_in_buffer_size >= PAGE_SIZE); 245 246 vmci_read_port_bytes(io_handle, dg_in_port, dg_in_buffer, 247 current_dg_in_buffer_size); 248 dg = (struct vmci_datagram *)dg_in_buffer; 249 remaining_bytes = current_dg_in_buffer_size; 250 251 while (dg->dst.resource != VMCI_INVALID_ID || 252 remaining_bytes > PAGE_SIZE) { 253 size_t dg_in_size; 254 255 /* 256 * When the input buffer spans multiple pages, a datagram can 257 * start on any page boundary in the buffer. 258 */ 259 260 if (dg->dst.resource == VMCI_INVALID_ID) { 261 ASSERT(remaining_bytes > PAGE_SIZE); 262 dg = (struct vmci_datagram *)ROUNDUP((uintptr_t)dg + 1, 263 PAGE_SIZE); 264 ASSERT((uint8_t *)dg < dg_in_buffer + 265 current_dg_in_buffer_size); 266 remaining_bytes = (size_t)(dg_in_buffer + 267 current_dg_in_buffer_size - (uint8_t *)dg); 268 continue; 269 } 270 271 dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); 272 273 if (dg_in_size <= dg_in_buffer_size) { 274 int result; 275 276 /* 277 * If the remaining bytes in the datagram buffer doesn't 278 * contain the complete datagram, we first make sure we 279 * have enough room for it and then we read the reminder 280 * of the datagram and possibly any following datagrams. 281 */ 282 283 if (dg_in_size > remaining_bytes) { 284 if (remaining_bytes != 285 current_dg_in_buffer_size) { 286 /* 287 * We move the partial datagram to the 288 * front and read the reminder of the 289 * datagram and possibly following calls 290 * into the following bytes. 291 */ 292 293 memmove(dg_in_buffer, dg_in_buffer + 294 current_dg_in_buffer_size - 295 remaining_bytes, 296 remaining_bytes); 297 298 dg = (struct vmci_datagram *) 299 dg_in_buffer; 300 } 301 302 if (current_dg_in_buffer_size != 303 dg_in_buffer_size) 304 current_dg_in_buffer_size = 305 dg_in_buffer_size; 306 307 vmci_read_port_bytes(io_handle, dg_in_port, 308 dg_in_buffer + remaining_bytes, 309 current_dg_in_buffer_size - 310 remaining_bytes); 311 } 312 313 /* 314 * We special case event datagrams from the 315 * hypervisor. 316 */ 317 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && 318 dg->dst.resource == VMCI_EVENT_HANDLER) 319 result = vmci_event_dispatch(dg); 320 else 321 result = 322 vmci_datagram_invoke_guest_handler(dg); 323 if (result < VMCI_SUCCESS) 324 VMCI_LOG_DEBUG(LGPFX"Datagram with resource" 325 " (ID=0x%x) failed (err=%d).\n", 326 dg->dst.resource, result); 327 328 /* On to the next datagram. */ 329 dg = (struct vmci_datagram *)((uint8_t *)dg + 330 dg_in_size); 331 } else { 332 size_t bytes_to_skip; 333 334 /* 335 * Datagram doesn't fit in datagram buffer of maximal 336 * size. We drop it. 337 */ 338 339 VMCI_LOG_DEBUG(LGPFX"Failed to receive datagram " 340 "(size=%zu bytes).\n", dg_in_size); 341 342 bytes_to_skip = dg_in_size - remaining_bytes; 343 if (current_dg_in_buffer_size != dg_in_buffer_size) 344 current_dg_in_buffer_size = dg_in_buffer_size; 345 for (;;) { 346 vmci_read_port_bytes(io_handle, dg_in_port, 347 dg_in_buffer, current_dg_in_buffer_size); 348 if (bytes_to_skip <= 349 current_dg_in_buffer_size) 350 break; 351 bytes_to_skip -= current_dg_in_buffer_size; 352 } 353 dg = (struct vmci_datagram *)(dg_in_buffer + 354 bytes_to_skip); 355 } 356 357 remaining_bytes = (size_t) (dg_in_buffer + 358 current_dg_in_buffer_size - (uint8_t *)dg); 359 360 if (remaining_bytes < VMCI_DG_HEADERSIZE) { 361 /* Get the next batch of datagrams. */ 362 363 vmci_read_port_bytes(io_handle, dg_in_port, 364 dg_in_buffer, current_dg_in_buffer_size); 365 dg = (struct vmci_datagram *)dg_in_buffer; 366 remaining_bytes = current_dg_in_buffer_size; 367 } 368 } 369 } 370 371 /* 372 *------------------------------------------------------------------------------ 373 * 374 * vmci_get_context_id -- 375 * 376 * Returns the current context ID. Note that since this is accessed only 377 * from code running in the host, this always returns the host context ID. 378 * 379 * Results: 380 * Context ID. 381 * 382 * Side effects: 383 * None. 384 * 385 *------------------------------------------------------------------------------ 386 */ 387 388 vmci_id 389 vmci_get_context_id(void) 390 { 391 if (atomic_load_int(&vm_context_id) == VMCI_INVALID_ID) { 392 uint32_t result; 393 struct vmci_datagram get_cid_msg; 394 get_cid_msg.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID, 395 VMCI_GET_CONTEXT_ID); 396 get_cid_msg.src = VMCI_ANON_SRC_HANDLE; 397 get_cid_msg.payload_size = 0; 398 result = vmci_send_datagram(&get_cid_msg); 399 atomic_store_int(&vm_context_id, result); 400 } 401 return (atomic_load_int(&vm_context_id)); 402 } 403 404 /* 405 *------------------------------------------------------------------------------ 406 * 407 * vmci_components_init -- 408 * 409 * Initializes VMCI components and registers core hypercalls. 410 * 411 * Results: 412 * VMCI_SUCCESS if successful, appropriate error code otherwise. 413 * 414 * Side effects: 415 * None. 416 * 417 *------------------------------------------------------------------------------ 418 */ 419 420 int 421 vmci_components_init(void) 422 { 423 int result; 424 425 result = vmci_resource_init(); 426 if (result < VMCI_SUCCESS) { 427 VMCI_LOG_WARNING(LGPFX"Failed to initialize vmci_resource " 428 "(result=%d).\n", result); 429 goto error_exit; 430 } 431 432 result = vmci_event_init(); 433 if (result < VMCI_SUCCESS) { 434 VMCI_LOG_WARNING(LGPFX"Failed to initialize vmci_event " 435 "(result=%d).\n", result); 436 goto resource_exit; 437 } 438 439 result = vmci_doorbell_init(); 440 if (result < VMCI_SUCCESS) { 441 VMCI_LOG_WARNING(LGPFX"Failed to initialize vmci_doorbell " 442 "(result=%d).\n", result); 443 goto event_exit; 444 } 445 446 VMCI_LOG_DEBUG(LGPFX"components initialized.\n"); 447 return (VMCI_SUCCESS); 448 449 event_exit: 450 vmci_event_exit(); 451 452 resource_exit: 453 vmci_resource_exit(); 454 455 error_exit: 456 return (result); 457 } 458 459 /* 460 *------------------------------------------------------------------------------ 461 * 462 * vmci_components_cleanup -- 463 * 464 * Cleans up VMCI components. 465 * 466 * Results: 467 * None. 468 * 469 * Side effects: 470 * None. 471 * 472 *------------------------------------------------------------------------------ 473 */ 474 475 void 476 vmci_components_cleanup(void) 477 { 478 479 vmci_doorbell_exit(); 480 vmci_event_exit(); 481 vmci_resource_exit(); 482 } 483