xref: /freebsd/sys/dev/vmware/vmci/vmci_event.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* This file implements VMCI Event code. */
8 
9 #include <sys/cdefs.h>
10 #include "vmci.h"
11 #include "vmci_driver.h"
12 #include "vmci_event.h"
13 #include "vmci_kernel_api.h"
14 #include "vmci_kernel_defs.h"
15 #include "vmci_kernel_if.h"
16 
17 #define LGPFX		"vmci_event: "
18 #define EVENT_MAGIC	0xEABE0000
19 
20 struct vmci_subscription {
21 	vmci_id		id;
22 	int		ref_count;
23 	bool		run_delayed;
24 	vmci_event	destroy_event;
25 	vmci_event_type	event;
26 	vmci_event_cb	callback;
27 	void		*callback_data;
28 	vmci_list_item(vmci_subscription) subscriber_list_item;
29 };
30 
31 static struct	vmci_subscription *vmci_event_find(vmci_id sub_id);
32 static int	vmci_event_deliver(struct vmci_event_msg *event_msg);
33 static int	vmci_event_register_subscription(struct vmci_subscription *sub,
34 		    vmci_event_type event, uint32_t flags,
35 		    vmci_event_cb callback, void *callback_data);
36 static struct	vmci_subscription *vmci_event_unregister_subscription(
37 		    vmci_id sub_id);
38 
39 static vmci_list(vmci_subscription) subscriber_array[VMCI_EVENT_MAX];
40 static vmci_lock subscriber_lock;
41 
42 struct vmci_delayed_event_info {
43 	struct vmci_subscription *sub;
44 	uint8_t event_payload[sizeof(struct vmci_event_data_max)];
45 };
46 
47 struct vmci_event_ref {
48 	struct vmci_subscription	*sub;
49 	vmci_list_item(vmci_event_ref)	list_item;
50 };
51 
52 /*
53  *------------------------------------------------------------------------------
54  *
55  * vmci_event_init --
56  *
57  *     General init code.
58  *
59  * Results:
60  *     VMCI_SUCCESS on success, appropriate error code otherwise.
61  *
62  * Side effects:
63  *     None.
64  *
65  *------------------------------------------------------------------------------
66  */
67 
68 int
69 vmci_event_init(void)
70 {
71 	int i;
72 
73 	for (i = 0; i < VMCI_EVENT_MAX; i++)
74 		vmci_list_init(&subscriber_array[i]);
75 
76 	return (vmci_init_lock(&subscriber_lock, "VMCI Event subscriber lock"));
77 }
78 
79 /*
80  *------------------------------------------------------------------------------
81  *
82  * vmci_event_exit --
83  *
84  *     General exit code.
85  *
86  * Results:
87  *     None.
88  *
89  * Side effects:
90  *     None.
91  *
92  *------------------------------------------------------------------------------
93  */
94 
95 void
96 vmci_event_exit(void)
97 {
98 	struct vmci_subscription *iter, *iter_2;
99 	vmci_event_type e;
100 
101 	/* We free all memory at exit. */
102 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
103 		vmci_list_scan_safe(iter, &subscriber_array[e],
104 		    subscriber_list_item, iter_2) {
105 			/*
106 			 * We should never get here because all events should
107 			 * have been unregistered before we try to unload the
108 			 * driver module. Also, delayed callbacks could still
109 			 * be firing so this cleanup would not be safe. Still
110 			 * it is better to free the memory than not ... so we
111 			 * leave this code in just in case....
112 			 */
113 			ASSERT(false);
114 
115 			vmci_free_kernel_mem(iter, sizeof(*iter));
116 		}
117 	}
118 	vmci_cleanup_lock(&subscriber_lock);
119 }
120 
121 /*
122  *------------------------------------------------------------------------------
123  *
124  * vmci_event_sync --
125  *
126  *     Use this as a synchronization point when setting globals, for example,
127  *     during device shutdown.
128  *
129  * Results:
130  *     true.
131  *
132  * Side effects:
133  *     None.
134  *
135  *------------------------------------------------------------------------------
136  */
137 
138 void
139 vmci_event_sync(void)
140 {
141 
142 	vmci_grab_lock_bh(&subscriber_lock);
143 	vmci_release_lock_bh(&subscriber_lock);
144 }
145 
146 /*
147  *------------------------------------------------------------------------------
148  *
149  * vmci_event_check_host_capabilities --
150  *
151  *     Verify that the host supports the hypercalls we need. If it does not,
152  *     try to find fallback hypercalls and use those instead.
153  *
154  * Results:
155  *     true if required hypercalls (or fallback hypercalls) are
156  *     supported by the host, false otherwise.
157  *
158  * Side effects:
159  *     None.
160  *
161  *------------------------------------------------------------------------------
162  */
163 
164 bool
165 vmci_event_check_host_capabilities(void)
166 {
167 
168 	/* vmci_event does not require any hypercalls. */
169 	return (true);
170 }
171 
172 /*
173  *------------------------------------------------------------------------------
174  *
175  * vmci_event_get --
176  *
177  *     Gets a reference to the given struct vmci_subscription.
178  *
179  * Results:
180  *     None.
181  *
182  * Side effects:
183  *     None.
184  *
185  *------------------------------------------------------------------------------
186  */
187 
188 static void
189 vmci_event_get(struct vmci_subscription *entry)
190 {
191 
192 	ASSERT(entry);
193 
194 	entry->ref_count++;
195 }
196 
197 /*
198  *------------------------------------------------------------------------------
199  *
200  * vmci_event_release --
201  *
202  *     Releases the given struct vmci_subscription.
203  *
204  * Results:
205  *     None.
206  *
207  * Side effects:
208  *     Fires the destroy event if the reference count has gone to zero.
209  *
210  *------------------------------------------------------------------------------
211  */
212 
213 static void
214 vmci_event_release(struct vmci_subscription *entry)
215 {
216 
217 	ASSERT(entry);
218 	ASSERT(entry->ref_count > 0);
219 
220 	entry->ref_count--;
221 	if (entry->ref_count == 0)
222 		vmci_signal_event(&entry->destroy_event);
223 }
224 
225  /*
226  *------------------------------------------------------------------------------
227  *
228  * event_release_cb --
229  *
230  *     Callback to release the event entry reference. It is called by the
231  *     vmci_wait_on_event function before it blocks.
232  *
233  * Result:
234  *     None.
235  *
236  * Side effects:
237  *     None.
238  *
239  *------------------------------------------------------------------------------
240  */
241 
242 static int
243 event_release_cb(void *client_data)
244 {
245 	struct vmci_subscription *sub = (struct vmci_subscription *)client_data;
246 
247 	ASSERT(sub);
248 
249 	vmci_grab_lock_bh(&subscriber_lock);
250 	vmci_event_release(sub);
251 	vmci_release_lock_bh(&subscriber_lock);
252 
253 	return (0);
254 }
255 
256 /*
257  *------------------------------------------------------------------------------
258  *
259  * vmci_event_find --
260  *
261  *     Find entry. Assumes lock is held.
262  *
263  * Results:
264  *     Entry if found, NULL if not.
265  *
266  * Side effects:
267  *     Increments the struct vmci_subscription refcount if an entry is found.
268  *
269  *------------------------------------------------------------------------------
270  */
271 
272 static struct vmci_subscription *
273 vmci_event_find(vmci_id sub_id)
274 {
275 	struct vmci_subscription *iter;
276 	vmci_event_type e;
277 
278 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
279 		vmci_list_scan(iter, &subscriber_array[e],
280 		    subscriber_list_item) {
281 			if (iter->id == sub_id) {
282 				vmci_event_get(iter);
283 				return (iter);
284 			}
285 		}
286 	}
287 	return (NULL);
288 }
289 
290 /*
291  *------------------------------------------------------------------------------
292  *
293  * vmci_event_delayed_dispatch_cb --
294  *
295  *     Calls the specified callback in a delayed context.
296  *
297  * Results:
298  *     None.
299  *
300  * Side effects:
301  *     None.
302  *
303  *------------------------------------------------------------------------------
304  */
305 
306 static void
307 vmci_event_delayed_dispatch_cb(void *data)
308 {
309 	struct vmci_delayed_event_info *event_info;
310 	struct vmci_subscription *sub;
311 	struct vmci_event_data *ed;
312 
313 	event_info = (struct vmci_delayed_event_info *)data;
314 
315 	ASSERT(event_info);
316 	ASSERT(event_info->sub);
317 
318 	sub = event_info->sub;
319 	ed = (struct vmci_event_data *)event_info->event_payload;
320 
321 	sub->callback(sub->id, ed, sub->callback_data);
322 
323 	vmci_grab_lock_bh(&subscriber_lock);
324 	vmci_event_release(sub);
325 	vmci_release_lock_bh(&subscriber_lock);
326 
327 	vmci_free_kernel_mem(event_info, sizeof(*event_info));
328 }
329 
330 /*
331  *------------------------------------------------------------------------------
332  *
333  * vmci_event_deliver --
334  *
335  *     Actually delivers the events to the subscribers.
336  *
337  * Results:
338  *     None.
339  *
340  * Side effects:
341  *     The callback function for each subscriber is invoked.
342  *
343  *------------------------------------------------------------------------------
344  */
345 
346 static int
347 vmci_event_deliver(struct vmci_event_msg *event_msg)
348 {
349 	struct vmci_subscription *iter;
350 	int err = VMCI_SUCCESS;
351 
352 	vmci_list(vmci_event_ref) no_delay_list;
353 	vmci_list_init(&no_delay_list);
354 
355 	ASSERT(event_msg);
356 
357 	vmci_grab_lock_bh(&subscriber_lock);
358 	vmci_list_scan(iter, &subscriber_array[event_msg->event_data.event],
359 	    subscriber_list_item) {
360 		if (iter->run_delayed) {
361 			struct vmci_delayed_event_info *event_info;
362 			if ((event_info =
363 			    vmci_alloc_kernel_mem(sizeof(*event_info),
364 			    VMCI_MEMORY_ATOMIC)) == NULL) {
365 				err = VMCI_ERROR_NO_MEM;
366 				goto out;
367 			}
368 
369 			vmci_event_get(iter);
370 
371 			memset(event_info, 0, sizeof(*event_info));
372 			memcpy(event_info->event_payload,
373 			    VMCI_DG_PAYLOAD(event_msg),
374 			    (size_t)event_msg->hdr.payload_size);
375 			event_info->sub = iter;
376 			err =
377 			    vmci_schedule_delayed_work(
378 			    vmci_event_delayed_dispatch_cb, event_info);
379 			if (err != VMCI_SUCCESS) {
380 				vmci_event_release(iter);
381 				vmci_free_kernel_mem(
382 				    event_info, sizeof(*event_info));
383 				goto out;
384 			}
385 
386 		} else {
387 			struct vmci_event_ref *event_ref;
388 
389 			/*
390 			 * We construct a local list of subscribers and release
391 			 * subscriber_lock before invoking the callbacks. This
392 			 * is similar to delayed callbacks, but callbacks are
393 			 * invoked right away here.
394 			 */
395 			if ((event_ref = vmci_alloc_kernel_mem(
396 			    sizeof(*event_ref), VMCI_MEMORY_ATOMIC)) == NULL) {
397 				err = VMCI_ERROR_NO_MEM;
398 				goto out;
399 			}
400 
401 			vmci_event_get(iter);
402 			event_ref->sub = iter;
403 			vmci_list_insert(&no_delay_list, event_ref, list_item);
404 		}
405 	}
406 
407 out:
408 	vmci_release_lock_bh(&subscriber_lock);
409 
410 	if (!vmci_list_empty(&no_delay_list)) {
411 		struct vmci_event_data *ed;
412 		struct vmci_event_ref *iter;
413 		struct vmci_event_ref *iter_2;
414 
415 		vmci_list_scan_safe(iter, &no_delay_list, list_item, iter_2) {
416 			struct vmci_subscription *cur;
417 			uint8_t event_payload[sizeof(
418 			    struct vmci_event_data_max)];
419 
420 			cur = iter->sub;
421 
422 			/*
423 			 * We set event data before each callback to ensure
424 			 * isolation.
425 			 */
426 			memset(event_payload, 0, sizeof(event_payload));
427 			memcpy(event_payload, VMCI_DG_PAYLOAD(event_msg),
428 			    (size_t)event_msg->hdr.payload_size);
429 			ed = (struct vmci_event_data *)event_payload;
430 			cur->callback(cur->id, ed, cur->callback_data);
431 
432 			vmci_grab_lock_bh(&subscriber_lock);
433 			vmci_event_release(cur);
434 			vmci_release_lock_bh(&subscriber_lock);
435 			vmci_free_kernel_mem(iter, sizeof(*iter));
436 		}
437 	}
438 
439 	return (err);
440 }
441 
442 /*
443  *------------------------------------------------------------------------------
444  *
445  * vmci_event_dispatch --
446  *
447  *     Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
448  *     subscribers for given event.
449  *
450  * Results:
451  *     VMCI_SUCCESS on success, error code otherwise.
452  *
453  * Side effects:
454  *     None.
455  *
456  *------------------------------------------------------------------------------
457  */
458 
459 int
460 vmci_event_dispatch(struct vmci_datagram *msg)
461 {
462 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
463 
464 	ASSERT(msg &&
465 	    msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
466 	    msg->dst.resource == VMCI_EVENT_HANDLER);
467 
468 	if (msg->payload_size < sizeof(vmci_event_type) ||
469 	    msg->payload_size > sizeof(struct vmci_event_data_max))
470 		return (VMCI_ERROR_INVALID_ARGS);
471 
472 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
473 		return (VMCI_ERROR_EVENT_UNKNOWN);
474 
475 	vmci_event_deliver(event_msg);
476 
477 	return (VMCI_SUCCESS);
478 }
479 
480 /*
481  *------------------------------------------------------------------------------
482  *
483  * vmci_event_register_subscription --
484  *
485  *     Initialize and add subscription to subscriber list.
486  *
487  * Results:
488  *     VMCI_SUCCESS on success, error code otherwise.
489  *
490  * Side effects:
491  *     None.
492  *
493  *------------------------------------------------------------------------------
494  */
495 
496 static int
497 vmci_event_register_subscription(struct vmci_subscription *sub,
498     vmci_event_type event, uint32_t flags, vmci_event_cb callback,
499     void *callback_data)
500 {
501 #define VMCI_EVENT_MAX_ATTEMPTS	10
502 	static vmci_id subscription_id = 0;
503 	int result;
504 	uint32_t attempts = 0;
505 	bool success;
506 
507 	ASSERT(sub);
508 
509 	if (!VMCI_EVENT_VALID(event) || callback == NULL) {
510 		VMCI_LOG_DEBUG(LGPFX"Failed to subscribe to event"
511 		    " (type=%d) (callback=%p) (data=%p).\n",
512 		    event, callback, callback_data);
513 		return (VMCI_ERROR_INVALID_ARGS);
514 	}
515 
516 	if (!vmci_can_schedule_delayed_work()) {
517 		/*
518 		 * If the platform doesn't support delayed work callbacks then
519 		 * don't allow registration for them.
520 		 */
521 		if (flags & VMCI_FLAG_EVENT_DELAYED_CB)
522 			return (VMCI_ERROR_INVALID_ARGS);
523 		sub->run_delayed = false;
524 	} else {
525 		/*
526 		 * The platform supports delayed work callbacks. Honor the
527 		 * requested flags
528 		 */
529 		sub->run_delayed = (flags & VMCI_FLAG_EVENT_DELAYED_CB) ?
530 		    true : false;
531 	}
532 
533 	sub->ref_count = 1;
534 	sub->event = event;
535 	sub->callback = callback;
536 	sub->callback_data = callback_data;
537 
538 	vmci_grab_lock_bh(&subscriber_lock);
539 
540 	for (success = false, attempts = 0;
541 	    success == false && attempts < VMCI_EVENT_MAX_ATTEMPTS;
542 	    attempts++) {
543 		struct vmci_subscription *existing_sub = NULL;
544 
545 		/*
546 		 * We try to get an id a couple of time before claiming we are
547 		 * out of resources.
548 		 */
549 		sub->id = ++subscription_id;
550 
551 		/* Test for duplicate id. */
552 		existing_sub = vmci_event_find(sub->id);
553 		if (existing_sub == NULL) {
554 			/* We succeeded if we didn't find a duplicate. */
555 			success = true;
556 		} else
557 			vmci_event_release(existing_sub);
558 	}
559 
560 	if (success) {
561 		vmci_create_event(&sub->destroy_event);
562 		vmci_list_insert(&subscriber_array[event], sub,
563 		    subscriber_list_item);
564 		result = VMCI_SUCCESS;
565 	} else
566 		result = VMCI_ERROR_NO_RESOURCES;
567 
568 	vmci_release_lock_bh(&subscriber_lock);
569 	return (result);
570 #undef VMCI_EVENT_MAX_ATTEMPTS
571 }
572 
573 /*
574  *------------------------------------------------------------------------------
575  *
576  * vmci_event_unregister_subscription --
577  *
578  *     Remove subscription from subscriber list.
579  *
580  * Results:
581  *     struct vmci_subscription when found, NULL otherwise.
582  *
583  * Side effects:
584  *     None.
585  *
586  *------------------------------------------------------------------------------
587  */
588 
589 static struct vmci_subscription *
590 vmci_event_unregister_subscription(vmci_id sub_id)
591 {
592 	struct vmci_subscription *s;
593 
594 	if (!vmci_initialized_lock(&subscriber_lock))
595 		return NULL;
596 
597 	vmci_grab_lock_bh(&subscriber_lock);
598 	s = vmci_event_find(sub_id);
599 	if (s != NULL) {
600 		vmci_event_release(s);
601 		vmci_list_remove(s, subscriber_list_item);
602 	}
603 	vmci_release_lock_bh(&subscriber_lock);
604 
605 	if (s != NULL) {
606 		vmci_wait_on_event(&s->destroy_event, event_release_cb, s);
607 		vmci_destroy_event(&s->destroy_event);
608 	}
609 
610 	return (s);
611 }
612 
613 /*
614  *------------------------------------------------------------------------------
615  *
616  * vmci_event_subscribe --
617  *
618  *     Subscribe to given event. The callback specified can be fired in
619  *     different contexts depending on what flag is specified while registering.
620  *     If flags contains VMCI_FLAG_EVENT_NONE then the callback is fired with
621  *     the subscriber lock held (and BH context on the guest). If flags contain
622  *     VMCI_FLAG_EVENT_DELAYED_CB then the callback is fired with no locks held
623  *     in thread context. This is useful because other vmci_event functions can
624  *     be called, but it also increases the chances that an event will be
625  *     dropped.
626  *
627  * Results:
628  *     VMCI_SUCCESS on success, error code otherwise.
629  *
630  * Side effects:
631  *     None.
632  *
633  *------------------------------------------------------------------------------
634  */
635 
636 int
637 vmci_event_subscribe(vmci_event_type event, vmci_event_cb callback,
638     void *callback_data, vmci_id *subscription_id)
639 {
640 	int retval;
641 	uint32_t flags = VMCI_FLAG_EVENT_NONE;
642 	struct vmci_subscription *s = NULL;
643 
644 	if (subscription_id == NULL) {
645 		VMCI_LOG_DEBUG(LGPFX"Invalid subscription (NULL).\n");
646 		return (VMCI_ERROR_INVALID_ARGS);
647 	}
648 
649 	s = vmci_alloc_kernel_mem(sizeof(*s), VMCI_MEMORY_NORMAL);
650 	if (s == NULL)
651 		return (VMCI_ERROR_NO_MEM);
652 
653 	retval = vmci_event_register_subscription(s, event, flags,
654 	    callback, callback_data);
655 	if (retval < VMCI_SUCCESS) {
656 		vmci_free_kernel_mem(s, sizeof(*s));
657 		return (retval);
658 	}
659 
660 	*subscription_id = s->id;
661 	return (retval);
662 }
663 
664 /*
665  *------------------------------------------------------------------------------
666  *
667  * vmci_event_unsubscribe --
668  *
669  *     Unsubscribe to given event. Removes it from list and frees it.
670  *     Will return callback_data if requested by caller.
671  *
672  * Results:
673  *     VMCI_SUCCESS on success, error code otherwise.
674  *
675  * Side effects:
676  *     None.
677  *
678  *------------------------------------------------------------------------------
679  */
680 
681 int
682 vmci_event_unsubscribe(vmci_id sub_id)
683 {
684 	struct vmci_subscription *s;
685 
686 	/*
687 	 * Return subscription. At this point we know noone else is accessing
688 	 * the subscription so we can free it.
689 	 */
690 	s = vmci_event_unregister_subscription(sub_id);
691 	if (s == NULL)
692 		return (VMCI_ERROR_NOT_FOUND);
693 	vmci_free_kernel_mem(s, sizeof(*s));
694 
695 	return (VMCI_SUCCESS);
696 }
697