xref: /freebsd/sys/dev/vmware/vmci/vmci_event.c (revision bdcfd222ce6369e7aeaceb9a92ffdde84bdbf6cd)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* This file implements VMCI Event code. */
8 
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11 
12 #include "vmci.h"
13 #include "vmci_driver.h"
14 #include "vmci_event.h"
15 #include "vmci_kernel_api.h"
16 #include "vmci_kernel_defs.h"
17 #include "vmci_kernel_if.h"
18 
19 #define LGPFX		"vmci_event: "
20 #define EVENT_MAGIC	0xEABE0000
21 
22 struct vmci_subscription {
23 	vmci_id		id;
24 	int		ref_count;
25 	bool		run_delayed;
26 	vmci_event	destroy_event;
27 	vmci_event_type	event;
28 	vmci_event_cb	callback;
29 	void		*callback_data;
30 	vmci_list_item(vmci_subscription) subscriber_list_item;
31 };
32 
33 static struct	vmci_subscription *vmci_event_find(vmci_id sub_id);
34 static int	vmci_event_deliver(struct vmci_event_msg *event_msg);
35 static int	vmci_event_register_subscription(struct vmci_subscription *sub,
36 		    vmci_event_type event, uint32_t flags,
37 		    vmci_event_cb callback, void *callback_data);
38 static struct	vmci_subscription *vmci_event_unregister_subscription(
39 		    vmci_id sub_id);
40 
41 static vmci_list(vmci_subscription) subscriber_array[VMCI_EVENT_MAX];
42 static vmci_lock subscriber_lock;
43 
44 struct vmci_delayed_event_info {
45 	struct vmci_subscription *sub;
46 	uint8_t event_payload[sizeof(struct vmci_event_data_max)];
47 };
48 
49 struct vmci_event_ref {
50 	struct vmci_subscription	*sub;
51 	vmci_list_item(vmci_event_ref)	list_item;
52 };
53 
54 /*
55  *------------------------------------------------------------------------------
56  *
57  * vmci_event_init --
58  *
59  *     General init code.
60  *
61  * Results:
62  *     VMCI_SUCCESS on success, appropriate error code otherwise.
63  *
64  * Side effects:
65  *     None.
66  *
67  *------------------------------------------------------------------------------
68  */
69 
70 int
71 vmci_event_init(void)
72 {
73 	int i;
74 
75 	for (i = 0; i < VMCI_EVENT_MAX; i++)
76 		vmci_list_init(&subscriber_array[i]);
77 
78 	return (vmci_init_lock(&subscriber_lock, "VMCI Event subscriber lock"));
79 }
80 
81 /*
82  *------------------------------------------------------------------------------
83  *
84  * vmci_event_exit --
85  *
86  *     General exit code.
87  *
88  * Results:
89  *     None.
90  *
91  * Side effects:
92  *     None.
93  *
94  *------------------------------------------------------------------------------
95  */
96 
97 void
98 vmci_event_exit(void)
99 {
100 	struct vmci_subscription *iter, *iter_2;
101 	vmci_event_type e;
102 
103 	/* We free all memory at exit. */
104 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
105 		vmci_list_scan_safe(iter, &subscriber_array[e],
106 		    subscriber_list_item, iter_2) {
107 			/*
108 			 * We should never get here because all events should
109 			 * have been unregistered before we try to unload the
110 			 * driver module. Also, delayed callbacks could still
111 			 * be firing so this cleanup would not be safe. Still
112 			 * it is better to free the memory than not ... so we
113 			 * leave this code in just in case....
114 			 */
115 			ASSERT(false);
116 
117 			vmci_free_kernel_mem(iter, sizeof(*iter));
118 		}
119 	}
120 	vmci_cleanup_lock(&subscriber_lock);
121 }
122 
123 /*
124  *------------------------------------------------------------------------------
125  *
126  * vmci_event_sync --
127  *
128  *     Use this as a synchronization point when setting globals, for example,
129  *     during device shutdown.
130  *
131  * Results:
132  *     true.
133  *
134  * Side effects:
135  *     None.
136  *
137  *------------------------------------------------------------------------------
138  */
139 
140 void
141 vmci_event_sync(void)
142 {
143 
144 	vmci_grab_lock_bh(&subscriber_lock);
145 	vmci_release_lock_bh(&subscriber_lock);
146 }
147 
148 /*
149  *------------------------------------------------------------------------------
150  *
151  * vmci_event_check_host_capabilities --
152  *
153  *     Verify that the host supports the hypercalls we need. If it does not,
154  *     try to find fallback hypercalls and use those instead.
155  *
156  * Results:
157  *     true if required hypercalls (or fallback hypercalls) are
158  *     supported by the host, false otherwise.
159  *
160  * Side effects:
161  *     None.
162  *
163  *------------------------------------------------------------------------------
164  */
165 
166 bool
167 vmci_event_check_host_capabilities(void)
168 {
169 
170 	/* vmci_event does not require any hypercalls. */
171 	return (true);
172 }
173 
174 /*
175  *------------------------------------------------------------------------------
176  *
177  * vmci_event_get --
178  *
179  *     Gets a reference to the given struct vmci_subscription.
180  *
181  * Results:
182  *     None.
183  *
184  * Side effects:
185  *     None.
186  *
187  *------------------------------------------------------------------------------
188  */
189 
190 static void
191 vmci_event_get(struct vmci_subscription *entry)
192 {
193 
194 	ASSERT(entry);
195 
196 	entry->ref_count++;
197 }
198 
199 /*
200  *------------------------------------------------------------------------------
201  *
202  * vmci_event_release --
203  *
204  *     Releases the given struct vmci_subscription.
205  *
206  * Results:
207  *     None.
208  *
209  * Side effects:
210  *     Fires the destroy event if the reference count has gone to zero.
211  *
212  *------------------------------------------------------------------------------
213  */
214 
215 static void
216 vmci_event_release(struct vmci_subscription *entry)
217 {
218 
219 	ASSERT(entry);
220 	ASSERT(entry->ref_count > 0);
221 
222 	entry->ref_count--;
223 	if (entry->ref_count == 0)
224 		vmci_signal_event(&entry->destroy_event);
225 }
226 
227  /*
228  *------------------------------------------------------------------------------
229  *
230  * event_release_cb --
231  *
232  *     Callback to release the event entry reference. It is called by the
233  *     vmci_wait_on_event function before it blocks.
234  *
235  * Result:
236  *     None.
237  *
238  * Side effects:
239  *     None.
240  *
241  *------------------------------------------------------------------------------
242  */
243 
244 static int
245 event_release_cb(void *client_data)
246 {
247 	struct vmci_subscription *sub = (struct vmci_subscription *)client_data;
248 
249 	ASSERT(sub);
250 
251 	vmci_grab_lock_bh(&subscriber_lock);
252 	vmci_event_release(sub);
253 	vmci_release_lock_bh(&subscriber_lock);
254 
255 	return (0);
256 }
257 
258 /*
259  *------------------------------------------------------------------------------
260  *
261  * vmci_event_find --
262  *
263  *     Find entry. Assumes lock is held.
264  *
265  * Results:
266  *     Entry if found, NULL if not.
267  *
268  * Side effects:
269  *     Increments the struct vmci_subscription refcount if an entry is found.
270  *
271  *------------------------------------------------------------------------------
272  */
273 
274 static struct vmci_subscription *
275 vmci_event_find(vmci_id sub_id)
276 {
277 	struct vmci_subscription *iter;
278 	vmci_event_type e;
279 
280 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
281 		vmci_list_scan(iter, &subscriber_array[e],
282 		    subscriber_list_item) {
283 			if (iter->id == sub_id) {
284 				vmci_event_get(iter);
285 				return (iter);
286 			}
287 		}
288 	}
289 	return (NULL);
290 }
291 
292 /*
293  *------------------------------------------------------------------------------
294  *
295  * vmci_event_delayed_dispatch_cb --
296  *
297  *     Calls the specified callback in a delayed context.
298  *
299  * Results:
300  *     None.
301  *
302  * Side effects:
303  *     None.
304  *
305  *------------------------------------------------------------------------------
306  */
307 
308 static void
309 vmci_event_delayed_dispatch_cb(void *data)
310 {
311 	struct vmci_delayed_event_info *event_info;
312 	struct vmci_subscription *sub;
313 	struct vmci_event_data *ed;
314 
315 	event_info = (struct vmci_delayed_event_info *)data;
316 
317 	ASSERT(event_info);
318 	ASSERT(event_info->sub);
319 
320 	sub = event_info->sub;
321 	ed = (struct vmci_event_data *)event_info->event_payload;
322 
323 	sub->callback(sub->id, ed, sub->callback_data);
324 
325 	vmci_grab_lock_bh(&subscriber_lock);
326 	vmci_event_release(sub);
327 	vmci_release_lock_bh(&subscriber_lock);
328 
329 	vmci_free_kernel_mem(event_info, sizeof(*event_info));
330 }
331 
332 /*
333  *------------------------------------------------------------------------------
334  *
335  * vmci_event_deliver --
336  *
337  *     Actually delivers the events to the subscribers.
338  *
339  * Results:
340  *     None.
341  *
342  * Side effects:
343  *     The callback function for each subscriber is invoked.
344  *
345  *------------------------------------------------------------------------------
346  */
347 
348 static int
349 vmci_event_deliver(struct vmci_event_msg *event_msg)
350 {
351 	struct vmci_subscription *iter;
352 	int err = VMCI_SUCCESS;
353 
354 	vmci_list(vmci_event_ref) no_delay_list;
355 	vmci_list_init(&no_delay_list);
356 
357 	ASSERT(event_msg);
358 
359 	vmci_grab_lock_bh(&subscriber_lock);
360 	vmci_list_scan(iter, &subscriber_array[event_msg->event_data.event],
361 	    subscriber_list_item) {
362 		if (iter->run_delayed) {
363 			struct vmci_delayed_event_info *event_info;
364 			if ((event_info =
365 			    vmci_alloc_kernel_mem(sizeof(*event_info),
366 			    VMCI_MEMORY_ATOMIC)) == NULL) {
367 				err = VMCI_ERROR_NO_MEM;
368 				goto out;
369 			}
370 
371 			vmci_event_get(iter);
372 
373 			memset(event_info, 0, sizeof(*event_info));
374 			memcpy(event_info->event_payload,
375 			    VMCI_DG_PAYLOAD(event_msg),
376 			    (size_t)event_msg->hdr.payload_size);
377 			event_info->sub = iter;
378 			err =
379 			    vmci_schedule_delayed_work(
380 			    vmci_event_delayed_dispatch_cb, event_info);
381 			if (err != VMCI_SUCCESS) {
382 				vmci_event_release(iter);
383 				vmci_free_kernel_mem(
384 				    event_info, sizeof(*event_info));
385 				goto out;
386 			}
387 
388 		} else {
389 			struct vmci_event_ref *event_ref;
390 
391 			/*
392 			 * We construct a local list of subscribers and release
393 			 * subscriber_lock before invoking the callbacks. This
394 			 * is similar to delayed callbacks, but callbacks are
395 			 * invoked right away here.
396 			 */
397 			if ((event_ref = vmci_alloc_kernel_mem(
398 			    sizeof(*event_ref), VMCI_MEMORY_ATOMIC)) == NULL) {
399 				err = VMCI_ERROR_NO_MEM;
400 				goto out;
401 			}
402 
403 			vmci_event_get(iter);
404 			event_ref->sub = iter;
405 			vmci_list_insert(&no_delay_list, event_ref, list_item);
406 		}
407 	}
408 
409 out:
410 	vmci_release_lock_bh(&subscriber_lock);
411 
412 	if (!vmci_list_empty(&no_delay_list)) {
413 		struct vmci_event_data *ed;
414 		struct vmci_event_ref *iter;
415 		struct vmci_event_ref *iter_2;
416 
417 		vmci_list_scan_safe(iter, &no_delay_list, list_item, iter_2) {
418 			struct vmci_subscription *cur;
419 			uint8_t event_payload[sizeof(
420 			    struct vmci_event_data_max)];
421 
422 			cur = iter->sub;
423 
424 			/*
425 			 * We set event data before each callback to ensure
426 			 * isolation.
427 			 */
428 			memset(event_payload, 0, sizeof(event_payload));
429 			memcpy(event_payload, VMCI_DG_PAYLOAD(event_msg),
430 			    (size_t)event_msg->hdr.payload_size);
431 			ed = (struct vmci_event_data *)event_payload;
432 			cur->callback(cur->id, ed, cur->callback_data);
433 
434 			vmci_grab_lock_bh(&subscriber_lock);
435 			vmci_event_release(cur);
436 			vmci_release_lock_bh(&subscriber_lock);
437 			vmci_free_kernel_mem(iter, sizeof(*iter));
438 		}
439 	}
440 
441 	return (err);
442 }
443 
444 /*
445  *------------------------------------------------------------------------------
446  *
447  * vmci_event_dispatch --
448  *
449  *     Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
450  *     subscribers for given event.
451  *
452  * Results:
453  *     VMCI_SUCCESS on success, error code otherwise.
454  *
455  * Side effects:
456  *     None.
457  *
458  *------------------------------------------------------------------------------
459  */
460 
461 int
462 vmci_event_dispatch(struct vmci_datagram *msg)
463 {
464 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
465 
466 	ASSERT(msg &&
467 	    msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
468 	    msg->dst.resource == VMCI_EVENT_HANDLER);
469 
470 	if (msg->payload_size < sizeof(vmci_event_type) ||
471 	    msg->payload_size > sizeof(struct vmci_event_data_max))
472 		return (VMCI_ERROR_INVALID_ARGS);
473 
474 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
475 		return (VMCI_ERROR_EVENT_UNKNOWN);
476 
477 	vmci_event_deliver(event_msg);
478 
479 	return (VMCI_SUCCESS);
480 }
481 
482 /*
483  *------------------------------------------------------------------------------
484  *
485  * vmci_event_register_subscription --
486  *
487  *     Initialize and add subscription to subscriber list.
488  *
489  * Results:
490  *     VMCI_SUCCESS on success, error code otherwise.
491  *
492  * Side effects:
493  *     None.
494  *
495  *------------------------------------------------------------------------------
496  */
497 
498 static int
499 vmci_event_register_subscription(struct vmci_subscription *sub,
500     vmci_event_type event, uint32_t flags, vmci_event_cb callback,
501     void *callback_data)
502 {
503 #define VMCI_EVENT_MAX_ATTEMPTS	10
504 	static vmci_id subscription_id = 0;
505 	int result;
506 	uint32_t attempts = 0;
507 	bool success;
508 
509 	ASSERT(sub);
510 
511 	if (!VMCI_EVENT_VALID(event) || callback == NULL) {
512 		VMCI_LOG_DEBUG(LGPFX"Failed to subscribe to event"
513 		    " (type=%d) (callback=%p) (data=%p).\n",
514 		    event, callback, callback_data);
515 		return (VMCI_ERROR_INVALID_ARGS);
516 	}
517 
518 	if (!vmci_can_schedule_delayed_work()) {
519 		/*
520 		 * If the platform doesn't support delayed work callbacks then
521 		 * don't allow registration for them.
522 		 */
523 		if (flags & VMCI_FLAG_EVENT_DELAYED_CB)
524 			return (VMCI_ERROR_INVALID_ARGS);
525 		sub->run_delayed = false;
526 	} else {
527 		/*
528 		 * The platform supports delayed work callbacks. Honor the
529 		 * requested flags
530 		 */
531 		sub->run_delayed = (flags & VMCI_FLAG_EVENT_DELAYED_CB) ?
532 		    true : false;
533 	}
534 
535 	sub->ref_count = 1;
536 	sub->event = event;
537 	sub->callback = callback;
538 	sub->callback_data = callback_data;
539 
540 	vmci_grab_lock_bh(&subscriber_lock);
541 
542 	for (success = false, attempts = 0;
543 	    success == false && attempts < VMCI_EVENT_MAX_ATTEMPTS;
544 	    attempts++) {
545 		struct vmci_subscription *existing_sub = NULL;
546 
547 		/*
548 		 * We try to get an id a couple of time before claiming we are
549 		 * out of resources.
550 		 */
551 		sub->id = ++subscription_id;
552 
553 		/* Test for duplicate id. */
554 		existing_sub = vmci_event_find(sub->id);
555 		if (existing_sub == NULL) {
556 			/* We succeeded if we didn't find a duplicate. */
557 			success = true;
558 		} else
559 			vmci_event_release(existing_sub);
560 	}
561 
562 	if (success) {
563 		vmci_create_event(&sub->destroy_event);
564 		vmci_list_insert(&subscriber_array[event], sub,
565 		    subscriber_list_item);
566 		result = VMCI_SUCCESS;
567 	} else
568 		result = VMCI_ERROR_NO_RESOURCES;
569 
570 	vmci_release_lock_bh(&subscriber_lock);
571 	return (result);
572 #undef VMCI_EVENT_MAX_ATTEMPTS
573 }
574 
575 /*
576  *------------------------------------------------------------------------------
577  *
578  * vmci_event_unregister_subscription --
579  *
580  *     Remove subscription from subscriber list.
581  *
582  * Results:
583  *     struct vmci_subscription when found, NULL otherwise.
584  *
585  * Side effects:
586  *     None.
587  *
588  *------------------------------------------------------------------------------
589  */
590 
591 static struct vmci_subscription *
592 vmci_event_unregister_subscription(vmci_id sub_id)
593 {
594 	struct vmci_subscription *s;
595 
596 	if (!vmci_initialized_lock(&subscriber_lock))
597 		return NULL;
598 
599 	vmci_grab_lock_bh(&subscriber_lock);
600 	s = vmci_event_find(sub_id);
601 	if (s != NULL) {
602 		vmci_event_release(s);
603 		vmci_list_remove(s, subscriber_list_item);
604 	}
605 	vmci_release_lock_bh(&subscriber_lock);
606 
607 	if (s != NULL) {
608 		vmci_wait_on_event(&s->destroy_event, event_release_cb, s);
609 		vmci_destroy_event(&s->destroy_event);
610 	}
611 
612 	return (s);
613 }
614 
615 /*
616  *------------------------------------------------------------------------------
617  *
618  * vmci_event_subscribe --
619  *
620  *     Subscribe to given event. The callback specified can be fired in
621  *     different contexts depending on what flag is specified while registering.
622  *     If flags contains VMCI_FLAG_EVENT_NONE then the callback is fired with
623  *     the subscriber lock held (and BH context on the guest). If flags contain
624  *     VMCI_FLAG_EVENT_DELAYED_CB then the callback is fired with no locks held
625  *     in thread context. This is useful because other vmci_event functions can
626  *     be called, but it also increases the chances that an event will be
627  *     dropped.
628  *
629  * Results:
630  *     VMCI_SUCCESS on success, error code otherwise.
631  *
632  * Side effects:
633  *     None.
634  *
635  *------------------------------------------------------------------------------
636  */
637 
638 int
639 vmci_event_subscribe(vmci_event_type event, vmci_event_cb callback,
640     void *callback_data, vmci_id *subscription_id)
641 {
642 	int retval;
643 	uint32_t flags = VMCI_FLAG_EVENT_NONE;
644 	struct vmci_subscription *s = NULL;
645 
646 	if (subscription_id == NULL) {
647 		VMCI_LOG_DEBUG(LGPFX"Invalid subscription (NULL).\n");
648 		return (VMCI_ERROR_INVALID_ARGS);
649 	}
650 
651 	s = vmci_alloc_kernel_mem(sizeof(*s), VMCI_MEMORY_NORMAL);
652 	if (s == NULL)
653 		return (VMCI_ERROR_NO_MEM);
654 
655 	retval = vmci_event_register_subscription(s, event, flags,
656 	    callback, callback_data);
657 	if (retval < VMCI_SUCCESS) {
658 		vmci_free_kernel_mem(s, sizeof(*s));
659 		return (retval);
660 	}
661 
662 	*subscription_id = s->id;
663 	return (retval);
664 }
665 
666 /*
667  *------------------------------------------------------------------------------
668  *
669  * vmci_event_unsubscribe --
670  *
671  *     Unsubscribe to given event. Removes it from list and frees it.
672  *     Will return callback_data if requested by caller.
673  *
674  * Results:
675  *     VMCI_SUCCESS on success, error code otherwise.
676  *
677  * Side effects:
678  *     None.
679  *
680  *------------------------------------------------------------------------------
681  */
682 
683 int
684 vmci_event_unsubscribe(vmci_id sub_id)
685 {
686 	struct vmci_subscription *s;
687 
688 	/*
689 	 * Return subscription. At this point we know noone else is accessing
690 	 * the subscription so we can free it.
691 	 */
692 	s = vmci_event_unregister_subscription(sub_id);
693 	if (s == NULL)
694 		return (VMCI_ERROR_NOT_FOUND);
695 	vmci_free_kernel_mem(s, sizeof(*s));
696 
697 	return (VMCI_SUCCESS);
698 }
699