xref: /linux/drivers/misc/vmw_vmci/vmci_event.c (revision c5288cda69ee2d8607f5026bd599a5cebf0ee783)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VMware VMCI Driver
4  *
5  * Copyright (C) 2012 VMware, Inc. All rights reserved.
6  */
7 
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/nospec.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 
17 #include "vmci_driver.h"
18 #include "vmci_event.h"
19 
20 #define EVENT_MAGIC 0xEABE0000
21 #define VMCI_EVENT_MAX_ATTEMPTS 10
22 
23 struct vmci_subscription {
24 	u32 id;
25 	u32 event;
26 	vmci_event_cb callback;
27 	void *callback_data;
28 	struct list_head node;	/* on one of subscriber lists */
29 };
30 
31 static struct list_head subscriber_array[VMCI_EVENT_MAX];
32 static DEFINE_MUTEX(subscriber_mutex);
33 
34 int __init vmci_event_init(void)
35 {
36 	int i;
37 
38 	for (i = 0; i < VMCI_EVENT_MAX; i++)
39 		INIT_LIST_HEAD(&subscriber_array[i]);
40 
41 	return VMCI_SUCCESS;
42 }
43 
44 void vmci_event_exit(void)
45 {
46 	int e;
47 
48 	/* We free all memory at exit. */
49 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
50 		struct vmci_subscription *cur, *p2;
51 		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
52 
53 			/*
54 			 * We should never get here because all events
55 			 * should have been unregistered before we try
56 			 * to unload the driver module.
57 			 */
58 			pr_warn("Unexpected free events occurring\n");
59 			list_del(&cur->node);
60 			kfree(cur);
61 		}
62 	}
63 }
64 
65 /*
66  * Find entry. Assumes subscriber_mutex is held.
67  */
68 static struct vmci_subscription *event_find(u32 sub_id)
69 {
70 	int e;
71 
72 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
73 		struct vmci_subscription *cur;
74 		list_for_each_entry(cur, &subscriber_array[e], node) {
75 			if (cur->id == sub_id)
76 				return cur;
77 		}
78 	}
79 	return NULL;
80 }
81 
82 /*
83  * Actually delivers the events to the subscribers.
84  * The callback function for each subscriber is invoked.
85  */
86 static void event_deliver(struct vmci_event_msg *event_msg)
87 {
88 	struct vmci_subscription *cur;
89 	struct list_head *subscriber_list;
90 	u32 sanitized_event, max_vmci_event;
91 
92 	rcu_read_lock();
93 	max_vmci_event = ARRAY_SIZE(subscriber_array);
94 	sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
95 	subscriber_list = &subscriber_array[sanitized_event];
96 	list_for_each_entry_rcu(cur, subscriber_list, node) {
97 		cur->callback(cur->id, &event_msg->event_data,
98 			      cur->callback_data);
99 	}
100 	rcu_read_unlock();
101 }
102 
103 /*
104  * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
105  * subscribers for given event.
106  */
107 int vmci_event_dispatch(struct vmci_datagram *msg)
108 {
109 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
110 
111 	if (msg->payload_size < sizeof(u32) ||
112 	    msg->payload_size > sizeof(struct vmci_event_data_max))
113 		return VMCI_ERROR_INVALID_ARGS;
114 
115 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
116 		return VMCI_ERROR_EVENT_UNKNOWN;
117 
118 	event_deliver(event_msg);
119 	return VMCI_SUCCESS;
120 }
121 
122 /*
123  * vmci_event_subscribe() - Subscribe to a given event.
124  * @event:      The event to subscribe to.
125  * @callback:   The callback to invoke upon the event.
126  * @callback_data:      Data to pass to the callback.
127  * @subscription_id:    ID used to track subscription.  Used with
128  *              vmci_event_unsubscribe()
129  *
130  * Subscribes to the provided event. The callback specified will be
131  * fired from RCU critical section and therefore must not sleep.
132  */
133 int vmci_event_subscribe(u32 event,
134 			 vmci_event_cb callback,
135 			 void *callback_data,
136 			 u32 *new_subscription_id)
137 {
138 	struct vmci_subscription *sub;
139 	int attempts;
140 	int retval;
141 	bool have_new_id = false;
142 
143 	if (!new_subscription_id) {
144 		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
145 		return VMCI_ERROR_INVALID_ARGS;
146 	}
147 
148 	if (!VMCI_EVENT_VALID(event) || !callback) {
149 		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
150 			 __func__, event, callback, callback_data);
151 		return VMCI_ERROR_INVALID_ARGS;
152 	}
153 
154 	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
155 	if (!sub)
156 		return VMCI_ERROR_NO_MEM;
157 
158 	sub->id = VMCI_EVENT_MAX;
159 	sub->event = event;
160 	sub->callback = callback;
161 	sub->callback_data = callback_data;
162 	INIT_LIST_HEAD(&sub->node);
163 
164 	mutex_lock(&subscriber_mutex);
165 
166 	/* Creation of a new event is always allowed. */
167 	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
168 		static u32 subscription_id;
169 		/*
170 		 * We try to get an id a couple of time before
171 		 * claiming we are out of resources.
172 		 */
173 
174 		/* Test for duplicate id. */
175 		if (!event_find(++subscription_id)) {
176 			sub->id = subscription_id;
177 			have_new_id = true;
178 			break;
179 		}
180 	}
181 
182 	if (have_new_id) {
183 		list_add_rcu(&sub->node, &subscriber_array[event]);
184 		retval = VMCI_SUCCESS;
185 	} else {
186 		retval = VMCI_ERROR_NO_RESOURCES;
187 	}
188 
189 	mutex_unlock(&subscriber_mutex);
190 
191 	*new_subscription_id = sub->id;
192 	return retval;
193 }
194 EXPORT_SYMBOL_GPL(vmci_event_subscribe);
195 
196 /*
197  * vmci_event_unsubscribe() - unsubscribe from an event.
198  * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
199  *
200  * Unsubscribe from given event. Removes it from list and frees it.
201  * Will return callback_data if requested by caller.
202  */
203 int vmci_event_unsubscribe(u32 sub_id)
204 {
205 	struct vmci_subscription *s;
206 
207 	mutex_lock(&subscriber_mutex);
208 	s = event_find(sub_id);
209 	if (s)
210 		list_del_rcu(&s->node);
211 	mutex_unlock(&subscriber_mutex);
212 
213 	if (!s)
214 		return VMCI_ERROR_NOT_FOUND;
215 
216 	kvfree_rcu_mightsleep(s);
217 
218 	return VMCI_SUCCESS;
219 }
220 EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
221