1 /*-
2 * Copyright (c) 2018 VMware, Inc.
3 *
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5 */
6
7 /* VMCI initialization. */
8
9 #include <sys/cdefs.h>
10 #include "vmci.h"
11 #include "vmci_doorbell.h"
12 #include "vmci_driver.h"
13 #include "vmci_event.h"
14 #include "vmci_kernel_api.h"
15 #include "vmci_kernel_defs.h"
16 #include "vmci_resource.h"
17
18 #define LGPFX "vmci: "
19 #define VMCI_UTIL_NUM_RESOURCES 1
20
21 static vmci_id ctx_update_sub_id = VMCI_INVALID_ID;
22 static volatile int vm_context_id = VMCI_INVALID_ID;
23
24 /*
25 *------------------------------------------------------------------------------
26 *
27 * vmci_util_cid_update --
28 *
29 * Gets called with the new context id if updated or resumed.
30 *
31 * Results:
32 * Context id.
33 *
34 * Side effects:
35 * None.
36 *
37 *------------------------------------------------------------------------------
38 */
39
40 static void
vmci_util_cid_update(vmci_id sub_id,struct vmci_event_data * event_data,void * client_data)41 vmci_util_cid_update(vmci_id sub_id, struct vmci_event_data *event_data,
42 void *client_data)
43 {
44 struct vmci_event_payload_context *ev_payload;
45
46 ev_payload = vmci_event_data_payload(event_data);
47
48 if (sub_id != ctx_update_sub_id) {
49 VMCI_LOG_DEBUG(LGPFX"Invalid subscriber (ID=0x%x).\n", sub_id);
50 return;
51 }
52 if (event_data == NULL || ev_payload->context_id == VMCI_INVALID_ID) {
53 VMCI_LOG_DEBUG(LGPFX"Invalid event data.\n");
54 return;
55 }
56 VMCI_LOG_INFO(LGPFX"Updating context from (ID=0x%x) to (ID=0x%x) on "
57 "event (type=%d).\n", atomic_load_int(&vm_context_id),
58 ev_payload->context_id, event_data->event);
59 atomic_store_int(&vm_context_id, ev_payload->context_id);
60 }
61
62 /*
63 *------------------------------------------------------------------------------
64 *
65 * vmci_util_init --
66 *
67 * Subscribe to context id update event.
68 *
69 * Results:
70 * None.
71 *
72 * Side effects:
73 * None.
74 *
75 *------------------------------------------------------------------------------
76 */
77
78 void
vmci_util_init(void)79 vmci_util_init(void)
80 {
81
82 /*
83 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can update
84 * the internal context id when needed.
85 */
86 if (vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
87 vmci_util_cid_update, NULL, &ctx_update_sub_id) < VMCI_SUCCESS) {
88 VMCI_LOG_WARNING(LGPFX"Failed to subscribe to event "
89 "(type=%d).\n", VMCI_EVENT_CTX_ID_UPDATE);
90 }
91 }
92
93 /*
94 *------------------------------------------------------------------------------
95 *
96 * vmci_util_exit --
97 *
98 * Cleanup
99 *
100 * Results:
101 * None.
102 *
103 * Side effects:
104 * None.
105 *
106 *------------------------------------------------------------------------------
107 */
108
109 void
vmci_util_exit(void)110 vmci_util_exit(void)
111 {
112
113 if (vmci_event_unsubscribe(ctx_update_sub_id) < VMCI_SUCCESS)
114 VMCI_LOG_WARNING(LGPFX"Failed to unsubscribe to event "
115 "(type=%d) with subscriber (ID=0x%x).\n",
116 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id);
117 }
118
119 /*
120 *------------------------------------------------------------------------------
121 *
122 * vmci_util_check_host_capabilities --
123 *
124 * Verify that the host supports the hypercalls we need. If it does not, try
125 * to find fallback hypercalls and use those instead.
126 *
127 * Results:
128 * true if required hypercalls (or fallback hypercalls) are supported by the
129 * host, false otherwise.
130 *
131 * Side effects:
132 * None.
133 *
134 *------------------------------------------------------------------------------
135 */
136
137 static bool
vmci_util_check_host_capabilities(void)138 vmci_util_check_host_capabilities(void)
139 {
140 struct vmci_resources_query_msg *msg;
141 struct vmci_datagram *check_msg;
142 int result;
143 uint32_t msg_size;
144
145 msg_size = sizeof(struct vmci_resources_query_hdr) +
146 VMCI_UTIL_NUM_RESOURCES * sizeof(vmci_resource);
147 check_msg = vmci_alloc_kernel_mem(msg_size, VMCI_MEMORY_NORMAL);
148
149 if (check_msg == NULL) {
150 VMCI_LOG_WARNING(LGPFX"Check host: Insufficient memory.\n");
151 return (false);
152 }
153
154 check_msg->dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
155 VMCI_RESOURCES_QUERY);
156 check_msg->src = VMCI_ANON_SRC_HANDLE;
157 check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
158 msg = (struct vmci_resources_query_msg *)VMCI_DG_PAYLOAD(check_msg);
159
160 msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
161 msg->resources[0] = VMCI_GET_CONTEXT_ID;
162
163 result = vmci_send_datagram(check_msg);
164 vmci_free_kernel_mem(check_msg, msg_size);
165
166 /* We need the vector. There are no fallbacks. */
167 return (result == 0x1);
168 }
169
170 /*
171 *------------------------------------------------------------------------------
172 *
173 * vmci_check_host_capabilities --
174 *
175 * Tell host which guestcalls we support and let each API check that the
176 * host supports the hypercalls it needs. If a hypercall is not supported,
177 * the API can check for a fallback hypercall, or fail the check.
178 *
179 * Results:
180 * true if successful, false otherwise.
181 *
182 * Side effects:
183 * Fallback mechanisms may be enabled in the API and vmmon.
184 *
185 *------------------------------------------------------------------------------
186 */
187
188 bool
vmci_check_host_capabilities(void)189 vmci_check_host_capabilities(void)
190 {
191 bool result;
192
193 result = vmci_event_check_host_capabilities();
194 result &= vmci_datagram_check_host_capabilities();
195 result &= vmci_util_check_host_capabilities();
196
197 if (!result) {
198 /*
199 * If it failed, then make sure this goes to the system event
200 * log.
201 */
202 VMCI_LOG_WARNING(LGPFX"Host capability checked failed.\n");
203 } else
204 VMCI_LOG_DEBUG(LGPFX"Host capability check passed.\n");
205
206 return (result);
207 }
208
209 /*
210 *------------------------------------------------------------------------------
211 *
212 * vmci_read_datagrams_from_port --
213 *
214 * Reads datagrams from the data in port and dispatches them. We always
215 * start reading datagrams into only the first page of the datagram buffer.
216 * If the datagrams don't fit into one page, we use the maximum datagram
217 * buffer size for the remainder of the invocation. This is a simple
218 * heuristic for not penalizing small datagrams.
219 *
220 * This function assumes that it has exclusive access to the data in port
221 * for the duration of the call.
222 *
223 * Results:
224 * No result.
225 *
226 * Side effects:
227 * Datagram handlers may be invoked.
228 *
229 *------------------------------------------------------------------------------
230 */
231
232 void
vmci_read_datagrams_from_port(vmci_io_handle io_handle,vmci_io_port dg_in_port,uint8_t * dg_in_buffer,size_t dg_in_buffer_size)233 vmci_read_datagrams_from_port(vmci_io_handle io_handle, vmci_io_port dg_in_port,
234 uint8_t *dg_in_buffer, size_t dg_in_buffer_size)
235 {
236 struct vmci_datagram *dg;
237 size_t current_dg_in_buffer_size;
238 size_t remaining_bytes;
239
240 current_dg_in_buffer_size = PAGE_SIZE;
241
242 ASSERT(dg_in_buffer_size >= PAGE_SIZE);
243
244 vmci_read_port_bytes(io_handle, dg_in_port, dg_in_buffer,
245 current_dg_in_buffer_size);
246 dg = (struct vmci_datagram *)dg_in_buffer;
247 remaining_bytes = current_dg_in_buffer_size;
248
249 while (dg->dst.resource != VMCI_INVALID_ID ||
250 remaining_bytes > PAGE_SIZE) {
251 size_t dg_in_size;
252
253 /*
254 * When the input buffer spans multiple pages, a datagram can
255 * start on any page boundary in the buffer.
256 */
257
258 if (dg->dst.resource == VMCI_INVALID_ID) {
259 ASSERT(remaining_bytes > PAGE_SIZE);
260 dg = (struct vmci_datagram *)ROUNDUP((uintptr_t)dg + 1,
261 PAGE_SIZE);
262 ASSERT((uint8_t *)dg < dg_in_buffer +
263 current_dg_in_buffer_size);
264 remaining_bytes = (size_t)(dg_in_buffer +
265 current_dg_in_buffer_size - (uint8_t *)dg);
266 continue;
267 }
268
269 dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
270
271 if (dg_in_size <= dg_in_buffer_size) {
272 int result;
273
274 /*
275 * If the remaining bytes in the datagram buffer doesn't
276 * contain the complete datagram, we first make sure we
277 * have enough room for it and then we read the reminder
278 * of the datagram and possibly any following datagrams.
279 */
280
281 if (dg_in_size > remaining_bytes) {
282 if (remaining_bytes !=
283 current_dg_in_buffer_size) {
284 /*
285 * We move the partial datagram to the
286 * front and read the reminder of the
287 * datagram and possibly following calls
288 * into the following bytes.
289 */
290
291 memmove(dg_in_buffer, dg_in_buffer +
292 current_dg_in_buffer_size -
293 remaining_bytes,
294 remaining_bytes);
295
296 dg = (struct vmci_datagram *)
297 dg_in_buffer;
298 }
299
300 if (current_dg_in_buffer_size !=
301 dg_in_buffer_size)
302 current_dg_in_buffer_size =
303 dg_in_buffer_size;
304
305 vmci_read_port_bytes(io_handle, dg_in_port,
306 dg_in_buffer + remaining_bytes,
307 current_dg_in_buffer_size -
308 remaining_bytes);
309 }
310
311 /*
312 * We special case event datagrams from the
313 * hypervisor.
314 */
315 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
316 dg->dst.resource == VMCI_EVENT_HANDLER)
317 result = vmci_event_dispatch(dg);
318 else
319 result =
320 vmci_datagram_invoke_guest_handler(dg);
321 if (result < VMCI_SUCCESS)
322 VMCI_LOG_DEBUG(LGPFX"Datagram with resource"
323 " (ID=0x%x) failed (err=%d).\n",
324 dg->dst.resource, result);
325
326 /* On to the next datagram. */
327 dg = (struct vmci_datagram *)((uint8_t *)dg +
328 dg_in_size);
329 } else {
330 size_t bytes_to_skip;
331
332 /*
333 * Datagram doesn't fit in datagram buffer of maximal
334 * size. We drop it.
335 */
336
337 VMCI_LOG_DEBUG(LGPFX"Failed to receive datagram "
338 "(size=%zu bytes).\n", dg_in_size);
339
340 bytes_to_skip = dg_in_size - remaining_bytes;
341 if (current_dg_in_buffer_size != dg_in_buffer_size)
342 current_dg_in_buffer_size = dg_in_buffer_size;
343 for (;;) {
344 vmci_read_port_bytes(io_handle, dg_in_port,
345 dg_in_buffer, current_dg_in_buffer_size);
346 if (bytes_to_skip <=
347 current_dg_in_buffer_size)
348 break;
349 bytes_to_skip -= current_dg_in_buffer_size;
350 }
351 dg = (struct vmci_datagram *)(dg_in_buffer +
352 bytes_to_skip);
353 }
354
355 remaining_bytes = (size_t) (dg_in_buffer +
356 current_dg_in_buffer_size - (uint8_t *)dg);
357
358 if (remaining_bytes < VMCI_DG_HEADERSIZE) {
359 /* Get the next batch of datagrams. */
360
361 vmci_read_port_bytes(io_handle, dg_in_port,
362 dg_in_buffer, current_dg_in_buffer_size);
363 dg = (struct vmci_datagram *)dg_in_buffer;
364 remaining_bytes = current_dg_in_buffer_size;
365 }
366 }
367 }
368
369 /*
370 *------------------------------------------------------------------------------
371 *
372 * vmci_get_context_id --
373 *
374 * Returns the current context ID. Note that since this is accessed only
375 * from code running in the host, this always returns the host context ID.
376 *
377 * Results:
378 * Context ID.
379 *
380 * Side effects:
381 * None.
382 *
383 *------------------------------------------------------------------------------
384 */
385
386 vmci_id
vmci_get_context_id(void)387 vmci_get_context_id(void)
388 {
389 if (atomic_load_int(&vm_context_id) == VMCI_INVALID_ID) {
390 uint32_t result;
391 struct vmci_datagram get_cid_msg;
392 get_cid_msg.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
393 VMCI_GET_CONTEXT_ID);
394 get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
395 get_cid_msg.payload_size = 0;
396 result = vmci_send_datagram(&get_cid_msg);
397 atomic_store_int(&vm_context_id, result);
398 }
399 return (atomic_load_int(&vm_context_id));
400 }
401
402 /*
403 *------------------------------------------------------------------------------
404 *
405 * vmci_components_init --
406 *
407 * Initializes VMCI components and registers core hypercalls.
408 *
409 * Results:
410 * VMCI_SUCCESS if successful, appropriate error code otherwise.
411 *
412 * Side effects:
413 * None.
414 *
415 *------------------------------------------------------------------------------
416 */
417
418 int
vmci_components_init(void)419 vmci_components_init(void)
420 {
421 int result;
422
423 result = vmci_resource_init();
424 if (result < VMCI_SUCCESS) {
425 VMCI_LOG_WARNING(LGPFX"Failed to initialize vmci_resource "
426 "(result=%d).\n", result);
427 goto error_exit;
428 }
429
430 result = vmci_event_init();
431 if (result < VMCI_SUCCESS) {
432 VMCI_LOG_WARNING(LGPFX"Failed to initialize vmci_event "
433 "(result=%d).\n", result);
434 goto resource_exit;
435 }
436
437 result = vmci_doorbell_init();
438 if (result < VMCI_SUCCESS) {
439 VMCI_LOG_WARNING(LGPFX"Failed to initialize vmci_doorbell "
440 "(result=%d).\n", result);
441 goto event_exit;
442 }
443
444 VMCI_LOG_DEBUG(LGPFX"components initialized.\n");
445 return (VMCI_SUCCESS);
446
447 event_exit:
448 vmci_event_exit();
449
450 resource_exit:
451 vmci_resource_exit();
452
453 error_exit:
454 return (result);
455 }
456
457 /*
458 *------------------------------------------------------------------------------
459 *
460 * vmci_components_cleanup --
461 *
462 * Cleans up VMCI components.
463 *
464 * Results:
465 * None.
466 *
467 * Side effects:
468 * None.
469 *
470 *------------------------------------------------------------------------------
471 */
472
473 void
vmci_components_cleanup(void)474 vmci_components_cleanup(void)
475 {
476
477 vmci_doorbell_exit();
478 vmci_event_exit();
479 vmci_resource_exit();
480 }
481