xref: /linux/drivers/virt/vboxguest/vboxguest_core.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3  * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
4  *
5  * Copyright (C) 2007-2016 Oracle Corporation
6  */
7 
8 #include <linux/device.h>
9 #include <linux/io.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/sizes.h>
13 #include <linux/slab.h>
14 #include <linux/vbox_err.h>
15 #include <linux/vbox_utils.h>
16 #include <linux/vmalloc.h>
17 #include "vboxguest_core.h"
18 #include "vboxguest_version.h"
19 
20 /* Get the pointer to the first HGCM parameter. */
21 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
22 	((struct vmmdev_hgcm_function_parameter *)( \
23 		(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
24 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
25 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
26 	((struct vmmdev_hgcm_function_parameter32 *)( \
27 		(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 
29 #define GUEST_MAPPINGS_TRIES	5
30 
31 #define VBG_KERNEL_REQUEST \
32 	(VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
33 	 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
34 
35 /**
36  * vbg_guest_mappings_init - Reserves memory in which the VMM can
37  *	relocate any guest mappings that are floating around.
38  * @gdev:		The Guest extension device.
39  *
40  * This operation is a little bit tricky since the VMM might not accept
41  * just any address because of address clashes between the three contexts
42  * it operates in, so we try several times.
43  *
44  * Failure to reserve the guest mappings is ignored.
45  */
vbg_guest_mappings_init(struct vbg_dev * gdev)46 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
47 {
48 	struct vmmdev_hypervisorinfo *req;
49 	void *guest_mappings[GUEST_MAPPINGS_TRIES];
50 	struct page **pages = NULL;
51 	u32 size, hypervisor_size;
52 	int i, rc;
53 
54 	/* Query the required space. */
55 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 			    VBG_KERNEL_REQUEST);
57 	if (!req)
58 		return;
59 
60 	req->hypervisor_start = 0;
61 	req->hypervisor_size = 0;
62 	rc = vbg_req_perform(gdev, req);
63 	if (rc < 0)
64 		goto out;
65 
66 	/*
67 	 * The VMM will report back if there is nothing it wants to map, like
68 	 * for instance in VT-x and AMD-V mode.
69 	 */
70 	if (req->hypervisor_size == 0)
71 		goto out;
72 
73 	hypervisor_size = req->hypervisor_size;
74 	/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
75 	size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
76 
77 	pages = kmalloc_objs(*pages, size >> PAGE_SHIFT);
78 	if (!pages)
79 		goto out;
80 
81 	gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
82 	if (!gdev->guest_mappings_dummy_page)
83 		goto out;
84 
85 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
86 		pages[i] = gdev->guest_mappings_dummy_page;
87 
88 	/*
89 	 * Try several times, the VMM might not accept some addresses because
90 	 * of address clashes between the three contexts.
91 	 */
92 	for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
93 		guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
94 					 VM_MAP, PAGE_KERNEL_RO);
95 		if (!guest_mappings[i])
96 			break;
97 
98 		req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
99 		req->header.rc = VERR_INTERNAL_ERROR;
100 		req->hypervisor_size = hypervisor_size;
101 		req->hypervisor_start =
102 			(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
103 
104 		rc = vbg_req_perform(gdev, req);
105 		if (rc >= 0) {
106 			gdev->guest_mappings = guest_mappings[i];
107 			break;
108 		}
109 	}
110 
111 	/* Free vmap's from failed attempts. */
112 	while (--i >= 0)
113 		vunmap(guest_mappings[i]);
114 
115 	/* On failure free the dummy-page backing the vmap */
116 	if (!gdev->guest_mappings) {
117 		__free_page(gdev->guest_mappings_dummy_page);
118 		gdev->guest_mappings_dummy_page = NULL;
119 	}
120 
121 out:
122 	vbg_req_free(req, sizeof(*req));
123 	kfree(pages);
124 }
125 
126 /**
127  * vbg_guest_mappings_exit - Undo what vbg_guest_mappings_init did.
128  *
129  * @gdev:		The Guest extension device.
130  */
vbg_guest_mappings_exit(struct vbg_dev * gdev)131 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
132 {
133 	struct vmmdev_hypervisorinfo *req;
134 	int rc;
135 
136 	if (!gdev->guest_mappings)
137 		return;
138 
139 	/*
140 	 * Tell the host that we're going to free the memory we reserved for
141 	 * it, the free it up. (Leak the memory if anything goes wrong here.)
142 	 */
143 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 			    VBG_KERNEL_REQUEST);
145 	if (!req)
146 		return;
147 
148 	req->hypervisor_start = 0;
149 	req->hypervisor_size = 0;
150 
151 	rc = vbg_req_perform(gdev, req);
152 
153 	vbg_req_free(req, sizeof(*req));
154 
155 	if (rc < 0) {
156 		vbg_err("%s error: %d\n", __func__, rc);
157 		return;
158 	}
159 
160 	vunmap(gdev->guest_mappings);
161 	gdev->guest_mappings = NULL;
162 
163 	__free_page(gdev->guest_mappings_dummy_page);
164 	gdev->guest_mappings_dummy_page = NULL;
165 }
166 
167 /**
168  * vbg_report_guest_info - Report the guest information to the host.
169  * @gdev:		The Guest extension device.
170  *
171  * Return: %0 or negative errno value.
172  */
vbg_report_guest_info(struct vbg_dev * gdev)173 static int vbg_report_guest_info(struct vbg_dev *gdev)
174 {
175 	/*
176 	 * Allocate and fill in the two guest info reports.
177 	 */
178 	struct vmmdev_guest_info *req1 = NULL;
179 	struct vmmdev_guest_info2 *req2 = NULL;
180 	int rc, ret = -ENOMEM;
181 
182 	req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
183 			     VBG_KERNEL_REQUEST);
184 	req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
185 			     VBG_KERNEL_REQUEST);
186 	if (!req1 || !req2)
187 		goto out_free;
188 
189 	req1->interface_version = VMMDEV_VERSION;
190 	req1->os_type = VMMDEV_OSTYPE_LINUX26;
191 #if __BITS_PER_LONG == 64
192 	req1->os_type |= VMMDEV_OSTYPE_X64;
193 #endif
194 
195 	req2->additions_major = VBG_VERSION_MAJOR;
196 	req2->additions_minor = VBG_VERSION_MINOR;
197 	req2->additions_build = VBG_VERSION_BUILD;
198 	req2->additions_revision = VBG_SVN_REV;
199 	req2->additions_features =
200 		VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
201 	strscpy(req2->name, VBG_VERSION_STRING,
202 		sizeof(req2->name));
203 
204 	/*
205 	 * There are two protocols here:
206 	 *      1. INFO2 + INFO1. Supported by >=3.2.51.
207 	 *      2. INFO1 and optionally INFO2. The old protocol.
208 	 *
209 	 * We try protocol 2 first.  It will fail with VERR_NOT_SUPPORTED
210 	 * if not supported by the VMMDev (message ordering requirement).
211 	 */
212 	rc = vbg_req_perform(gdev, req2);
213 	if (rc >= 0) {
214 		rc = vbg_req_perform(gdev, req1);
215 	} else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
216 		rc = vbg_req_perform(gdev, req1);
217 		if (rc >= 0) {
218 			rc = vbg_req_perform(gdev, req2);
219 			if (rc == VERR_NOT_IMPLEMENTED)
220 				rc = VINF_SUCCESS;
221 		}
222 	}
223 	ret = vbg_status_code_to_errno(rc);
224 
225 out_free:
226 	vbg_req_free(req2, sizeof(*req2));
227 	vbg_req_free(req1, sizeof(*req1));
228 	return ret;
229 }
230 
231 /**
232  * vbg_report_driver_status - Report the guest driver status to the host.
233  * @gdev:		The Guest extension device.
234  * @active:		Flag whether the driver is now active or not.
235  *
236  * Return: 0 or negative errno value.
237  */
vbg_report_driver_status(struct vbg_dev * gdev,bool active)238 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
239 {
240 	struct vmmdev_guest_status *req;
241 	int rc;
242 
243 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
244 			    VBG_KERNEL_REQUEST);
245 	if (!req)
246 		return -ENOMEM;
247 
248 	req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
249 	if (active)
250 		req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
251 	else
252 		req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
253 	req->flags = 0;
254 
255 	rc = vbg_req_perform(gdev, req);
256 	if (rc == VERR_NOT_IMPLEMENTED)	/* Compatibility with older hosts. */
257 		rc = VINF_SUCCESS;
258 
259 	vbg_req_free(req, sizeof(*req));
260 
261 	return vbg_status_code_to_errno(rc);
262 }
263 
264 /**
265  * vbg_balloon_inflate - Inflate the balloon by one chunk. The caller
266  * owns the balloon mutex.
267  * @gdev:		The Guest extension device.
268  * @chunk_idx:		Index of the chunk.
269  *
270  * Return: %0 or negative errno value.
271  */
vbg_balloon_inflate(struct vbg_dev * gdev,u32 chunk_idx)272 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
273 {
274 	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
275 	struct page **pages;
276 	int i, rc, ret;
277 
278 	pages = kmalloc_objs(*pages, VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
279 			     GFP_KERNEL | __GFP_NOWARN);
280 	if (!pages)
281 		return -ENOMEM;
282 
283 	req->header.size = sizeof(*req);
284 	req->inflate = true;
285 	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
286 
287 	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
288 		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
289 		if (!pages[i]) {
290 			ret = -ENOMEM;
291 			goto out_error;
292 		}
293 
294 		req->phys_page[i] = page_to_phys(pages[i]);
295 	}
296 
297 	rc = vbg_req_perform(gdev, req);
298 	if (rc < 0) {
299 		vbg_err("%s error, rc: %d\n", __func__, rc);
300 		ret = vbg_status_code_to_errno(rc);
301 		goto out_error;
302 	}
303 
304 	gdev->mem_balloon.pages[chunk_idx] = pages;
305 
306 	return 0;
307 
308 out_error:
309 	while (--i >= 0)
310 		__free_page(pages[i]);
311 	kfree(pages);
312 
313 	return ret;
314 }
315 
316 /**
317  * vbg_balloon_deflate - Deflate the balloon by one chunk. The caller
318  * owns the balloon mutex.
319  * @gdev:		The Guest extension device.
320  * @chunk_idx:		Index of the chunk.
321  *
322  * Return: %0 or negative errno value.
323  */
vbg_balloon_deflate(struct vbg_dev * gdev,u32 chunk_idx)324 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
325 {
326 	struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
327 	struct page **pages = gdev->mem_balloon.pages[chunk_idx];
328 	int i, rc;
329 
330 	req->header.size = sizeof(*req);
331 	req->inflate = false;
332 	req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
333 
334 	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
335 		req->phys_page[i] = page_to_phys(pages[i]);
336 
337 	rc = vbg_req_perform(gdev, req);
338 	if (rc < 0) {
339 		vbg_err("%s error, rc: %d\n", __func__, rc);
340 		return vbg_status_code_to_errno(rc);
341 	}
342 
343 	for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
344 		__free_page(pages[i]);
345 	kfree(pages);
346 	gdev->mem_balloon.pages[chunk_idx] = NULL;
347 
348 	return 0;
349 }
350 
351 /*
352  * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
353  * the host wants the balloon to be and adjust accordingly.
354  */
vbg_balloon_work(struct work_struct * work)355 static void vbg_balloon_work(struct work_struct *work)
356 {
357 	struct vbg_dev *gdev =
358 		container_of(work, struct vbg_dev, mem_balloon.work);
359 	struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
360 	u32 i, chunks;
361 	int rc, ret;
362 
363 	/*
364 	 * Setting this bit means that we request the value from the host and
365 	 * change the guest memory balloon according to the returned value.
366 	 */
367 	req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
368 	rc = vbg_req_perform(gdev, req);
369 	if (rc < 0) {
370 		vbg_err("%s error, rc: %d)\n", __func__, rc);
371 		return;
372 	}
373 
374 	/*
375 	 * The host always returns the same maximum amount of chunks, so
376 	 * we do this once.
377 	 */
378 	if (!gdev->mem_balloon.max_chunks) {
379 		gdev->mem_balloon.pages =
380 			devm_kcalloc(gdev->dev, req->phys_mem_chunks,
381 				     sizeof(struct page **), GFP_KERNEL);
382 		if (!gdev->mem_balloon.pages)
383 			return;
384 
385 		gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
386 	}
387 
388 	chunks = req->balloon_chunks;
389 	if (chunks > gdev->mem_balloon.max_chunks) {
390 		vbg_err("%s: illegal balloon size %u (max=%u)\n",
391 			__func__, chunks, gdev->mem_balloon.max_chunks);
392 		return;
393 	}
394 
395 	if (chunks > gdev->mem_balloon.chunks) {
396 		/* inflate */
397 		for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
398 			ret = vbg_balloon_inflate(gdev, i);
399 			if (ret < 0)
400 				return;
401 
402 			gdev->mem_balloon.chunks++;
403 		}
404 	} else {
405 		/* deflate */
406 		for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
407 			ret = vbg_balloon_deflate(gdev, i);
408 			if (ret < 0)
409 				return;
410 
411 			gdev->mem_balloon.chunks--;
412 		}
413 	}
414 }
415 
416 /*
417  * Callback for heartbeat timer.
418  */
vbg_heartbeat_timer(struct timer_list * t)419 static void vbg_heartbeat_timer(struct timer_list *t)
420 {
421 	struct vbg_dev *gdev = timer_container_of(gdev, t, heartbeat_timer);
422 
423 	vbg_req_perform(gdev, gdev->guest_heartbeat_req);
424 	mod_timer(&gdev->heartbeat_timer,
425 		  msecs_to_jiffies(gdev->heartbeat_interval_ms));
426 }
427 
428 /**
429  * vbg_heartbeat_host_config - Configure the host to check guest's heartbeat
430  *	and get heartbeat interval from the host.
431  * @gdev:		The Guest extension device.
432  * @enabled:		Set true to enable guest heartbeat checks on host.
433  *
434  * Return: %0 or negative errno value.
435  */
vbg_heartbeat_host_config(struct vbg_dev * gdev,bool enabled)436 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
437 {
438 	struct vmmdev_heartbeat *req;
439 	int rc;
440 
441 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
442 			    VBG_KERNEL_REQUEST);
443 	if (!req)
444 		return -ENOMEM;
445 
446 	req->enabled = enabled;
447 	req->interval_ns = 0;
448 	rc = vbg_req_perform(gdev, req);
449 	do_div(req->interval_ns, 1000000); /* ns -> ms */
450 	gdev->heartbeat_interval_ms = req->interval_ns;
451 	vbg_req_free(req, sizeof(*req));
452 
453 	return vbg_status_code_to_errno(rc);
454 }
455 
456 /**
457  * vbg_heartbeat_init - Initializes the heartbeat timer. This feature
458  * may be disabled by the host.
459  * @gdev:		The Guest extension device.
460  *
461  * Return: %0 or negative errno value.
462  */
vbg_heartbeat_init(struct vbg_dev * gdev)463 static int vbg_heartbeat_init(struct vbg_dev *gdev)
464 {
465 	int ret;
466 
467 	/* Make sure that heartbeat checking is disabled if we fail. */
468 	ret = vbg_heartbeat_host_config(gdev, false);
469 	if (ret < 0)
470 		return ret;
471 
472 	ret = vbg_heartbeat_host_config(gdev, true);
473 	if (ret < 0)
474 		return ret;
475 
476 	gdev->guest_heartbeat_req = vbg_req_alloc(
477 					sizeof(*gdev->guest_heartbeat_req),
478 					VMMDEVREQ_GUEST_HEARTBEAT,
479 					VBG_KERNEL_REQUEST);
480 	if (!gdev->guest_heartbeat_req)
481 		return -ENOMEM;
482 
483 	vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
484 		 __func__, gdev->heartbeat_interval_ms);
485 	mod_timer(&gdev->heartbeat_timer, 0);
486 
487 	return 0;
488 }
489 
490 /**
491  * vbg_heartbeat_exit - Cleanup heartbeat code, stop HB timer and disable
492  *	host heartbeat checking.
493  * @gdev:		The Guest extension device.
494  */
vbg_heartbeat_exit(struct vbg_dev * gdev)495 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
496 {
497 	timer_delete_sync(&gdev->heartbeat_timer);
498 	vbg_heartbeat_host_config(gdev, false);
499 	vbg_req_free(gdev->guest_heartbeat_req,
500 		     sizeof(*gdev->guest_heartbeat_req));
501 }
502 
503 /**
504  * vbg_track_bit_usage - Applies a change to the bit usage tracker.
505  * @tracker:		The bit usage tracker.
506  * @changed:		The bits to change.
507  * @previous:		The previous value of the bits.
508  *
509  * Return: %true if the mask changed, %false if not.
510  */
vbg_track_bit_usage(struct vbg_bit_usage_tracker * tracker,u32 changed,u32 previous)511 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
512 				u32 changed, u32 previous)
513 {
514 	bool global_change = false;
515 
516 	while (changed) {
517 		u32 bit = ffs(changed) - 1;
518 		u32 bitmask = BIT(bit);
519 
520 		if (bitmask & previous) {
521 			tracker->per_bit_usage[bit] -= 1;
522 			if (tracker->per_bit_usage[bit] == 0) {
523 				global_change = true;
524 				tracker->mask &= ~bitmask;
525 			}
526 		} else {
527 			tracker->per_bit_usage[bit] += 1;
528 			if (tracker->per_bit_usage[bit] == 1) {
529 				global_change = true;
530 				tracker->mask |= bitmask;
531 			}
532 		}
533 
534 		changed &= ~bitmask;
535 	}
536 
537 	return global_change;
538 }
539 
540 /**
541  * vbg_reset_host_event_filter - Init and termination worker for
542  *	resetting the (host) event filter on the host
543  * @gdev:		   The Guest extension device.
544  * @fixed_events:	   Fixed events (init time).
545  *
546  * Return: %0 or negative errno value.
547  */
vbg_reset_host_event_filter(struct vbg_dev * gdev,u32 fixed_events)548 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
549 				       u32 fixed_events)
550 {
551 	struct vmmdev_mask *req;
552 	int rc;
553 
554 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
555 			    VBG_KERNEL_REQUEST);
556 	if (!req)
557 		return -ENOMEM;
558 
559 	req->not_mask = U32_MAX & ~fixed_events;
560 	req->or_mask = fixed_events;
561 	rc = vbg_req_perform(gdev, req);
562 	if (rc < 0)
563 		vbg_err("%s error, rc: %d\n", __func__, rc);
564 
565 	vbg_req_free(req, sizeof(*req));
566 	return vbg_status_code_to_errno(rc);
567 }
568 
569 /**
570  * vbg_set_session_event_filter - Changes the event filter mask for the
571  *	given session.
572  * @gdev:			The Guest extension device.
573  * @session:			The session.
574  * @or_mask:			The events to add.
575  * @not_mask:			The events to remove.
576  * @session_termination:	Set if we're called by the session cleanup code.
577  *				This tweaks the error handling so we perform
578  *				proper session cleanup even if the host
579  *				misbehaves.
580  *
581  * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
582  * do session cleanup. Takes the session mutex.
583  *
584  * Return: 0 or negative errno value.
585  */
vbg_set_session_event_filter(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,bool session_termination)586 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
587 					struct vbg_session *session,
588 					u32 or_mask, u32 not_mask,
589 					bool session_termination)
590 {
591 	struct vmmdev_mask *req;
592 	u32 changed, previous;
593 	int rc, ret = 0;
594 
595 	/*
596 	 * Allocate a request buffer before taking the spinlock, when
597 	 * the session is being terminated the requestor is the kernel,
598 	 * as we're cleaning up.
599 	 */
600 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
601 			    session_termination ? VBG_KERNEL_REQUEST :
602 						  session->requestor);
603 	if (!req) {
604 		if (!session_termination)
605 			return -ENOMEM;
606 		/* Ignore allocation failure, we must do session cleanup. */
607 	}
608 
609 	mutex_lock(&gdev->session_mutex);
610 
611 	/* Apply the changes to the session mask. */
612 	previous = session->event_filter;
613 	session->event_filter |= or_mask;
614 	session->event_filter &= ~not_mask;
615 
616 	/* If anything actually changed, update the global usage counters. */
617 	changed = previous ^ session->event_filter;
618 	if (!changed)
619 		goto out;
620 
621 	vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
622 	or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
623 
624 	if (gdev->event_filter_host == or_mask || !req)
625 		goto out;
626 
627 	gdev->event_filter_host = or_mask;
628 	req->or_mask = or_mask;
629 	req->not_mask = ~or_mask;
630 	rc = vbg_req_perform(gdev, req);
631 	if (rc < 0) {
632 		ret = vbg_status_code_to_errno(rc);
633 
634 		/* Failed, roll back (unless it's session termination time). */
635 		gdev->event_filter_host = U32_MAX;
636 		if (session_termination)
637 			goto out;
638 
639 		vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
640 				    session->event_filter);
641 		session->event_filter = previous;
642 	}
643 
644 out:
645 	mutex_unlock(&gdev->session_mutex);
646 	vbg_req_free(req, sizeof(*req));
647 
648 	return ret;
649 }
650 
651 /**
652  * vbg_reset_host_capabilities - Init and termination worker for set
653  *	guest capabilities to zero on the host.
654  * @gdev:		The Guest extension device.
655  *
656  * Return: %0 or negative errno value.
657  */
vbg_reset_host_capabilities(struct vbg_dev * gdev)658 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
659 {
660 	struct vmmdev_mask *req;
661 	int rc;
662 
663 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
664 			    VBG_KERNEL_REQUEST);
665 	if (!req)
666 		return -ENOMEM;
667 
668 	req->not_mask = U32_MAX;
669 	req->or_mask = 0;
670 	rc = vbg_req_perform(gdev, req);
671 	if (rc < 0)
672 		vbg_err("%s error, rc: %d\n", __func__, rc);
673 
674 	vbg_req_free(req, sizeof(*req));
675 	return vbg_status_code_to_errno(rc);
676 }
677 
678 /**
679  * vbg_set_host_capabilities - Set guest capabilities on the host.
680  * @gdev:			The Guest extension device.
681  * @session:			The session.
682  * @session_termination:	Set if we're called by the session cleanup code.
683  *
684  * Must be called with gdev->session_mutex hold.
685  *
686  * Return: %0 or negative errno value.
687  */
vbg_set_host_capabilities(struct vbg_dev * gdev,struct vbg_session * session,bool session_termination)688 static int vbg_set_host_capabilities(struct vbg_dev *gdev,
689 				     struct vbg_session *session,
690 				     bool session_termination)
691 {
692 	struct vmmdev_mask *req;
693 	u32 caps;
694 	int rc;
695 
696 	WARN_ON(!mutex_is_locked(&gdev->session_mutex));
697 
698 	caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
699 
700 	if (gdev->guest_caps_host == caps)
701 		return 0;
702 
703 	/* On termination the requestor is the kernel, as we're cleaning up. */
704 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
705 			    session_termination ? VBG_KERNEL_REQUEST :
706 						  session->requestor);
707 	if (!req) {
708 		gdev->guest_caps_host = U32_MAX;
709 		return -ENOMEM;
710 	}
711 
712 	req->or_mask = caps;
713 	req->not_mask = ~caps;
714 	rc = vbg_req_perform(gdev, req);
715 	vbg_req_free(req, sizeof(*req));
716 
717 	gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
718 
719 	return vbg_status_code_to_errno(rc);
720 }
721 
722 /**
723  * vbg_acquire_session_capabilities - Acquire (get exclusive access)
724  *	guest capabilities for a session.
725  * @gdev:			The Guest extension device.
726  * @session:			The session.
727  * @flags:			Flags (VBGL_IOC_AGC_FLAGS_XXX).
728  * @or_mask:			The capabilities to add.
729  * @not_mask:			The capabilities to remove.
730  * @session_termination:	Set if we're called by the session cleanup code.
731  *				This tweaks the error handling so we perform
732  *				proper session cleanup even if the host
733  *				misbehaves.
734  *
735  * Takes the session mutex.
736  *
737  * Return: %0 or negative errno value.
738  */
vbg_acquire_session_capabilities(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,u32 flags,bool session_termination)739 static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
740 					    struct vbg_session *session,
741 					    u32 or_mask, u32 not_mask,
742 					    u32 flags, bool session_termination)
743 {
744 	unsigned long irqflags;
745 	bool wakeup = false;
746 	int ret = 0;
747 
748 	mutex_lock(&gdev->session_mutex);
749 
750 	if (gdev->set_guest_caps_tracker.mask & or_mask) {
751 		vbg_err("%s error: cannot acquire caps which are currently set\n",
752 			__func__);
753 		ret = -EINVAL;
754 		goto out;
755 	}
756 
757 	/*
758 	 * Mark any caps in the or_mask as now being in acquire-mode. Note
759 	 * once caps are in acquire_mode they always stay in this mode.
760 	 * This impacts event handling, so we take the event-lock.
761 	 */
762 	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
763 	gdev->acquire_mode_guest_caps |= or_mask;
764 	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
765 
766 	/* If we only have to switch the caps to acquire mode, we're done. */
767 	if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
768 		goto out;
769 
770 	not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
771 	not_mask &= session->acquired_guest_caps;
772 	or_mask &= ~session->acquired_guest_caps;
773 
774 	if (or_mask == 0 && not_mask == 0)
775 		goto out;
776 
777 	if (gdev->acquired_guest_caps & or_mask) {
778 		ret = -EBUSY;
779 		goto out;
780 	}
781 
782 	gdev->acquired_guest_caps |= or_mask;
783 	gdev->acquired_guest_caps &= ~not_mask;
784 	/* session->acquired_guest_caps impacts event handling, take the lock */
785 	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
786 	session->acquired_guest_caps |= or_mask;
787 	session->acquired_guest_caps &= ~not_mask;
788 	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
789 
790 	ret = vbg_set_host_capabilities(gdev, session, session_termination);
791 	/* Roll back on failure, unless it's session termination time. */
792 	if (ret < 0 && !session_termination) {
793 		gdev->acquired_guest_caps &= ~or_mask;
794 		gdev->acquired_guest_caps |= not_mask;
795 		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
796 		session->acquired_guest_caps &= ~or_mask;
797 		session->acquired_guest_caps |= not_mask;
798 		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
799 	}
800 
801 	/*
802 	 * If we added a capability, check if that means some other thread in
803 	 * our session should be unblocked because there are events pending
804 	 * (the result of vbg_get_allowed_event_mask_for_session() may change).
805 	 *
806 	 * HACK ALERT! When the seamless support capability is added we generate
807 	 *	a seamless change event so that the ring-3 client can sync with
808 	 *	the seamless state.
809 	 */
810 	if (ret == 0 && or_mask != 0) {
811 		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
812 
813 		if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
814 			gdev->pending_events |=
815 				VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
816 
817 		if (gdev->pending_events)
818 			wakeup = true;
819 
820 		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
821 
822 		if (wakeup)
823 			wake_up(&gdev->event_wq);
824 	}
825 
826 out:
827 	mutex_unlock(&gdev->session_mutex);
828 
829 	return ret;
830 }
831 
832 /**
833  * vbg_set_session_capabilities - Sets the guest capabilities for a
834  *	session. Takes the session mutex.
835  * @gdev:			The Guest extension device.
836  * @session:			The session.
837  * @or_mask:			The capabilities to add.
838  * @not_mask:			The capabilities to remove.
839  * @session_termination:	Set if we're called by the session cleanup code.
840  *				This tweaks the error handling so we perform
841  *				proper session cleanup even if the host
842  *				misbehaves.
843  *
844  * Return: %0 or negative errno value.
845  */
vbg_set_session_capabilities(struct vbg_dev * gdev,struct vbg_session * session,u32 or_mask,u32 not_mask,bool session_termination)846 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
847 					struct vbg_session *session,
848 					u32 or_mask, u32 not_mask,
849 					bool session_termination)
850 {
851 	u32 changed, previous;
852 	int ret = 0;
853 
854 	mutex_lock(&gdev->session_mutex);
855 
856 	if (gdev->acquire_mode_guest_caps & or_mask) {
857 		vbg_err("%s error: cannot set caps which are in acquire_mode\n",
858 			__func__);
859 		ret = -EBUSY;
860 		goto out;
861 	}
862 
863 	/* Apply the changes to the session mask. */
864 	previous = session->set_guest_caps;
865 	session->set_guest_caps |= or_mask;
866 	session->set_guest_caps &= ~not_mask;
867 
868 	/* If anything actually changed, update the global usage counters. */
869 	changed = previous ^ session->set_guest_caps;
870 	if (!changed)
871 		goto out;
872 
873 	vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
874 
875 	ret = vbg_set_host_capabilities(gdev, session, session_termination);
876 	/* Roll back on failure, unless it's session termination time. */
877 	if (ret < 0 && !session_termination) {
878 		vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
879 				    session->set_guest_caps);
880 		session->set_guest_caps = previous;
881 	}
882 
883 out:
884 	mutex_unlock(&gdev->session_mutex);
885 
886 	return ret;
887 }
888 
889 /**
890  * vbg_query_host_version - get the host feature mask and version information.
891  * @gdev:		The Guest extension device.
892  *
893  * Return: %0 or negative errno value.
894  */
vbg_query_host_version(struct vbg_dev * gdev)895 static int vbg_query_host_version(struct vbg_dev *gdev)
896 {
897 	struct vmmdev_host_version *req;
898 	int rc, ret;
899 
900 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
901 			    VBG_KERNEL_REQUEST);
902 	if (!req)
903 		return -ENOMEM;
904 
905 	rc = vbg_req_perform(gdev, req);
906 	ret = vbg_status_code_to_errno(rc);
907 	if (ret) {
908 		vbg_err("%s error: %d\n", __func__, rc);
909 		goto out;
910 	}
911 
912 	snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
913 		 req->major, req->minor, req->build, req->revision);
914 	gdev->host_features = req->features;
915 
916 	vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
917 		 gdev->host_features);
918 
919 	if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
920 		vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
921 		ret = -ENODEV;
922 	}
923 
924 out:
925 	vbg_req_free(req, sizeof(*req));
926 	return ret;
927 }
928 
929 /**
930  * vbg_core_init - Initializes the VBoxGuest device extension when the
931  *	device driver is loaded.
932  * @gdev:		The Guest extension device.
933  * @fixed_events:	Events that will be enabled upon init and no client
934  *			will ever be allowed to mask.
935  *
936  * The native code locates the VMMDev on the PCI bus and retrieve
937  * the MMIO and I/O port ranges, this function will take care of
938  * mapping the MMIO memory (if present). Upon successful return
939  * the native code should set up the interrupt handler.
940  *
941  * Return: %0 or negative errno value.
942  */
vbg_core_init(struct vbg_dev * gdev,u32 fixed_events)943 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
944 {
945 	int ret = -ENOMEM;
946 
947 	gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
948 	gdev->event_filter_host = U32_MAX;	/* forces a report */
949 	gdev->guest_caps_host = U32_MAX;	/* forces a report */
950 
951 	init_waitqueue_head(&gdev->event_wq);
952 	init_waitqueue_head(&gdev->hgcm_wq);
953 	spin_lock_init(&gdev->event_spinlock);
954 	mutex_init(&gdev->session_mutex);
955 	mutex_init(&gdev->cancel_req_mutex);
956 	timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
957 	INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
958 
959 	gdev->mem_balloon.get_req =
960 		vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
961 			      VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
962 			      VBG_KERNEL_REQUEST);
963 	gdev->mem_balloon.change_req =
964 		vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
965 			      VMMDEVREQ_CHANGE_MEMBALLOON,
966 			      VBG_KERNEL_REQUEST);
967 	gdev->cancel_req =
968 		vbg_req_alloc(sizeof(*(gdev->cancel_req)),
969 			      VMMDEVREQ_HGCM_CANCEL2,
970 			      VBG_KERNEL_REQUEST);
971 	gdev->ack_events_req =
972 		vbg_req_alloc(sizeof(*gdev->ack_events_req),
973 			      VMMDEVREQ_ACKNOWLEDGE_EVENTS,
974 			      VBG_KERNEL_REQUEST);
975 	gdev->mouse_status_req =
976 		vbg_req_alloc(sizeof(*gdev->mouse_status_req),
977 			      VMMDEVREQ_GET_MOUSE_STATUS,
978 			      VBG_KERNEL_REQUEST);
979 
980 	if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
981 	    !gdev->cancel_req || !gdev->ack_events_req ||
982 	    !gdev->mouse_status_req)
983 		goto err_free_reqs;
984 
985 	ret = vbg_query_host_version(gdev);
986 	if (ret)
987 		goto err_free_reqs;
988 
989 	ret = vbg_report_guest_info(gdev);
990 	if (ret) {
991 		vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
992 		goto err_free_reqs;
993 	}
994 
995 	ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
996 	if (ret) {
997 		vbg_err("vboxguest: Error setting fixed event filter: %d\n",
998 			ret);
999 		goto err_free_reqs;
1000 	}
1001 
1002 	ret = vbg_reset_host_capabilities(gdev);
1003 	if (ret) {
1004 		vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
1005 			ret);
1006 		goto err_free_reqs;
1007 	}
1008 
1009 	ret = vbg_core_set_mouse_status(gdev, 0);
1010 	if (ret) {
1011 		vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
1012 		goto err_free_reqs;
1013 	}
1014 
1015 	/* These may fail without requiring the driver init to fail. */
1016 	vbg_guest_mappings_init(gdev);
1017 	vbg_heartbeat_init(gdev);
1018 
1019 	/* All Done! */
1020 	ret = vbg_report_driver_status(gdev, true);
1021 	if (ret < 0)
1022 		vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
1023 
1024 	return 0;
1025 
1026 err_free_reqs:
1027 	vbg_req_free(gdev->mouse_status_req,
1028 		     sizeof(*gdev->mouse_status_req));
1029 	vbg_req_free(gdev->ack_events_req,
1030 		     sizeof(*gdev->ack_events_req));
1031 	vbg_req_free(gdev->cancel_req,
1032 		     sizeof(*gdev->cancel_req));
1033 	vbg_req_free(gdev->mem_balloon.change_req,
1034 		     sizeof(*gdev->mem_balloon.change_req));
1035 	vbg_req_free(gdev->mem_balloon.get_req,
1036 		     sizeof(*gdev->mem_balloon.get_req));
1037 	return ret;
1038 }
1039 
1040 /**
1041  * vbg_core_exit - Call this on exit to clean-up vboxguest-core managed
1042  *	resources.
1043  * @gdev:		The Guest extension device.
1044  *
1045  * The native code should call this before the driver is loaded,
1046  * but don't call this on shutdown.
1047  */
vbg_core_exit(struct vbg_dev * gdev)1048 void vbg_core_exit(struct vbg_dev *gdev)
1049 {
1050 	vbg_heartbeat_exit(gdev);
1051 	vbg_guest_mappings_exit(gdev);
1052 
1053 	/* Clear the host flags (mouse status etc). */
1054 	vbg_reset_host_event_filter(gdev, 0);
1055 	vbg_reset_host_capabilities(gdev);
1056 	vbg_core_set_mouse_status(gdev, 0);
1057 
1058 	vbg_req_free(gdev->mouse_status_req,
1059 		     sizeof(*gdev->mouse_status_req));
1060 	vbg_req_free(gdev->ack_events_req,
1061 		     sizeof(*gdev->ack_events_req));
1062 	vbg_req_free(gdev->cancel_req,
1063 		     sizeof(*gdev->cancel_req));
1064 	vbg_req_free(gdev->mem_balloon.change_req,
1065 		     sizeof(*gdev->mem_balloon.change_req));
1066 	vbg_req_free(gdev->mem_balloon.get_req,
1067 		     sizeof(*gdev->mem_balloon.get_req));
1068 }
1069 
1070 /**
1071  * vbg_core_open_session - Creates a VBoxGuest user session.
1072  * @gdev:		The Guest extension device.
1073  * @requestor:		VMMDEV_REQUESTOR_* flags
1074  *
1075  * vboxguest_linux.c calls this when userspace opens the char-device.
1076  *
1077  * Return: A pointer to the new session or an ERR_PTR on error.
1078  */
vbg_core_open_session(struct vbg_dev * gdev,u32 requestor)1079 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
1080 {
1081 	struct vbg_session *session;
1082 
1083 	session = kzalloc_obj(*session);
1084 	if (!session)
1085 		return ERR_PTR(-ENOMEM);
1086 
1087 	session->gdev = gdev;
1088 	session->requestor = requestor;
1089 
1090 	return session;
1091 }
1092 
1093 /**
1094  * vbg_core_close_session - Closes a VBoxGuest session.
1095  * @session:		The session to close (and free).
1096  */
vbg_core_close_session(struct vbg_session * session)1097 void vbg_core_close_session(struct vbg_session *session)
1098 {
1099 	struct vbg_dev *gdev = session->gdev;
1100 	int i, rc;
1101 
1102 	vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
1103 	vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
1104 	vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
1105 
1106 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1107 		if (!session->hgcm_client_ids[i])
1108 			continue;
1109 
1110 		/* requestor is kernel here, as we're cleaning up. */
1111 		vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
1112 				    session->hgcm_client_ids[i], &rc);
1113 	}
1114 
1115 	kfree(session);
1116 }
1117 
vbg_ioctl_chk(struct vbg_ioctl_hdr * hdr,size_t in_size,size_t out_size)1118 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
1119 			 size_t out_size)
1120 {
1121 	if (hdr->size_in  != (sizeof(*hdr) + in_size) ||
1122 	    hdr->size_out != (sizeof(*hdr) + out_size))
1123 		return -EINVAL;
1124 
1125 	return 0;
1126 }
1127 
vbg_ioctl_driver_version_info(struct vbg_ioctl_driver_version_info * info)1128 static int vbg_ioctl_driver_version_info(
1129 	struct vbg_ioctl_driver_version_info *info)
1130 {
1131 	const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
1132 	u16 min_maj_version, req_maj_version;
1133 
1134 	if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
1135 		return -EINVAL;
1136 
1137 	req_maj_version = info->u.in.req_version >> 16;
1138 	min_maj_version = info->u.in.min_version >> 16;
1139 
1140 	if (info->u.in.min_version > info->u.in.req_version ||
1141 	    min_maj_version != req_maj_version)
1142 		return -EINVAL;
1143 
1144 	if (info->u.in.min_version <= VBG_IOC_VERSION &&
1145 	    min_maj_version == vbg_maj_version) {
1146 		info->u.out.session_version = VBG_IOC_VERSION;
1147 	} else {
1148 		info->u.out.session_version = U32_MAX;
1149 		info->hdr.rc = VERR_VERSION_MISMATCH;
1150 	}
1151 
1152 	info->u.out.driver_version  = VBG_IOC_VERSION;
1153 	info->u.out.driver_revision = 0;
1154 	info->u.out.reserved1      = 0;
1155 	info->u.out.reserved2      = 0;
1156 
1157 	return 0;
1158 }
1159 
1160 /* Must be called with the event_lock held */
vbg_get_allowed_event_mask_for_session(struct vbg_dev * gdev,struct vbg_session * session)1161 static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
1162 						  struct vbg_session *session)
1163 {
1164 	u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
1165 	u32 session_acquired_caps = session->acquired_guest_caps;
1166 	u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
1167 
1168 	if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
1169 	    !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
1170 		allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
1171 
1172 	if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
1173 	    !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
1174 		allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
1175 
1176 	return allowed_events;
1177 }
1178 
vbg_wait_event_cond(struct vbg_dev * gdev,struct vbg_session * session,u32 event_mask)1179 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
1180 				struct vbg_session *session,
1181 				u32 event_mask)
1182 {
1183 	unsigned long flags;
1184 	bool wakeup;
1185 	u32 events;
1186 
1187 	spin_lock_irqsave(&gdev->event_spinlock, flags);
1188 
1189 	events = gdev->pending_events & event_mask;
1190 	events &= vbg_get_allowed_event_mask_for_session(gdev, session);
1191 	wakeup = events || session->cancel_waiters;
1192 
1193 	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1194 
1195 	return wakeup;
1196 }
1197 
1198 /* Must be called with the event_lock held */
vbg_consume_events_locked(struct vbg_dev * gdev,struct vbg_session * session,u32 event_mask)1199 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
1200 				     struct vbg_session *session,
1201 				     u32 event_mask)
1202 {
1203 	u32 events = gdev->pending_events & event_mask;
1204 
1205 	events &= vbg_get_allowed_event_mask_for_session(gdev, session);
1206 	gdev->pending_events &= ~events;
1207 	return events;
1208 }
1209 
vbg_ioctl_wait_for_events(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_wait_for_events * wait)1210 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1211 				     struct vbg_session *session,
1212 				     struct vbg_ioctl_wait_for_events *wait)
1213 {
1214 	u32 timeout_ms = wait->u.in.timeout_ms;
1215 	u32 event_mask = wait->u.in.events;
1216 	unsigned long flags;
1217 	long timeout;
1218 	int ret = 0;
1219 
1220 	if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1221 		return -EINVAL;
1222 
1223 	if (timeout_ms == U32_MAX)
1224 		timeout = MAX_SCHEDULE_TIMEOUT;
1225 	else
1226 		timeout = msecs_to_jiffies(timeout_ms);
1227 
1228 	wait->u.out.events = 0;
1229 	do {
1230 		timeout = wait_event_interruptible_timeout(
1231 				gdev->event_wq,
1232 				vbg_wait_event_cond(gdev, session, event_mask),
1233 				timeout);
1234 
1235 		spin_lock_irqsave(&gdev->event_spinlock, flags);
1236 
1237 		if (timeout < 0 || session->cancel_waiters) {
1238 			ret = -EINTR;
1239 		} else if (timeout == 0) {
1240 			ret = -ETIMEDOUT;
1241 		} else {
1242 			wait->u.out.events =
1243 			   vbg_consume_events_locked(gdev, session, event_mask);
1244 		}
1245 
1246 		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1247 
1248 		/*
1249 		 * Someone else may have consumed the event(s) first, in
1250 		 * which case we go back to waiting.
1251 		 */
1252 	} while (ret == 0 && wait->u.out.events == 0);
1253 
1254 	return ret;
1255 }
1256 
vbg_ioctl_interrupt_all_wait_events(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hdr * hdr)1257 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1258 					       struct vbg_session *session,
1259 					       struct vbg_ioctl_hdr *hdr)
1260 {
1261 	unsigned long flags;
1262 
1263 	if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1264 		return -EINVAL;
1265 
1266 	spin_lock_irqsave(&gdev->event_spinlock, flags);
1267 	session->cancel_waiters = true;
1268 	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1269 
1270 	wake_up(&gdev->event_wq);
1271 
1272 	return 0;
1273 }
1274 
1275 /**
1276  * vbg_req_allowed - Checks if the VMM request is allowed in the
1277  *	context of the given session.
1278  * @gdev:		The Guest extension device.
1279  * @session:		The calling session.
1280  * @req:		The request.
1281  *
1282  * Return: %0 or negative errno value.
1283  */
vbg_req_allowed(struct vbg_dev * gdev,struct vbg_session * session,const struct vmmdev_request_header * req)1284 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1285 			   const struct vmmdev_request_header *req)
1286 {
1287 	const struct vmmdev_guest_status *guest_status;
1288 	bool trusted_apps_only;
1289 
1290 	switch (req->request_type) {
1291 	/* Trusted users apps only. */
1292 	case VMMDEVREQ_QUERY_CREDENTIALS:
1293 	case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1294 	case VMMDEVREQ_REGISTER_SHARED_MODULE:
1295 	case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1296 	case VMMDEVREQ_WRITE_COREDUMP:
1297 	case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1298 	case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1299 	case VMMDEVREQ_CHECK_SHARED_MODULES:
1300 	case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1301 	case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1302 	case VMMDEVREQ_REPORT_GUEST_STATS:
1303 	case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1304 	case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1305 		trusted_apps_only = true;
1306 		break;
1307 
1308 	/* Anyone. */
1309 	case VMMDEVREQ_GET_MOUSE_STATUS:
1310 	case VMMDEVREQ_SET_MOUSE_STATUS:
1311 	case VMMDEVREQ_SET_POINTER_SHAPE:
1312 	case VMMDEVREQ_GET_HOST_VERSION:
1313 	case VMMDEVREQ_IDLE:
1314 	case VMMDEVREQ_GET_HOST_TIME:
1315 	case VMMDEVREQ_SET_POWER_STATUS:
1316 	case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1317 	case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1318 	case VMMDEVREQ_REPORT_GUEST_STATUS:
1319 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1320 	case VMMDEVREQ_VIDEMODE_SUPPORTED:
1321 	case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1322 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1323 	case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1324 	case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1325 	case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1326 	case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1327 	case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
1328 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1329 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
1330 	case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1331 	case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1332 	case VMMDEVREQ_LOG_STRING:
1333 	case VMMDEVREQ_GET_SESSION_ID:
1334 		trusted_apps_only = false;
1335 		break;
1336 
1337 	/* Depends on the request parameters... */
1338 	case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1339 		guest_status = (const struct vmmdev_guest_status *)req;
1340 		switch (guest_status->facility) {
1341 		case VBOXGUEST_FACILITY_TYPE_ALL:
1342 		case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1343 			vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1344 				guest_status->facility);
1345 			return -EPERM;
1346 		case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1347 			trusted_apps_only = true;
1348 			break;
1349 		case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1350 		case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1351 		case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1352 		default:
1353 			trusted_apps_only = false;
1354 			break;
1355 		}
1356 		break;
1357 
1358 	/* Anything else is not allowed. */
1359 	default:
1360 		vbg_err("Denying userspace vmm call type %#08x\n",
1361 			req->request_type);
1362 		return -EPERM;
1363 	}
1364 
1365 	if (trusted_apps_only &&
1366 	    (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1367 		vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1368 			req->request_type);
1369 		return -EPERM;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
vbg_ioctl_vmmrequest(struct vbg_dev * gdev,struct vbg_session * session,void * data)1375 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1376 				struct vbg_session *session, void *data)
1377 {
1378 	struct vbg_ioctl_hdr *hdr = data;
1379 	int ret;
1380 
1381 	if (hdr->size_in != hdr->size_out)
1382 		return -EINVAL;
1383 
1384 	if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1385 		return -E2BIG;
1386 
1387 	if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1388 		return -EINVAL;
1389 
1390 	ret = vbg_req_allowed(gdev, session, data);
1391 	if (ret < 0)
1392 		return ret;
1393 
1394 	vbg_req_perform(gdev, data);
1395 	WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1396 
1397 	return 0;
1398 }
1399 
vbg_ioctl_hgcm_connect(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hgcm_connect * conn)1400 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1401 				  struct vbg_session *session,
1402 				  struct vbg_ioctl_hgcm_connect *conn)
1403 {
1404 	u32 client_id;
1405 	int i, ret;
1406 
1407 	if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1408 		return -EINVAL;
1409 
1410 	/* Find a free place in the sessions clients array and claim it */
1411 	mutex_lock(&gdev->session_mutex);
1412 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1413 		if (!session->hgcm_client_ids[i]) {
1414 			session->hgcm_client_ids[i] = U32_MAX;
1415 			break;
1416 		}
1417 	}
1418 	mutex_unlock(&gdev->session_mutex);
1419 
1420 	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1421 		return -EMFILE;
1422 
1423 	ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1424 			       &client_id, &conn->hdr.rc);
1425 
1426 	mutex_lock(&gdev->session_mutex);
1427 	if (ret == 0 && conn->hdr.rc >= 0) {
1428 		conn->u.out.client_id = client_id;
1429 		session->hgcm_client_ids[i] = client_id;
1430 	} else {
1431 		conn->u.out.client_id = 0;
1432 		session->hgcm_client_ids[i] = 0;
1433 	}
1434 	mutex_unlock(&gdev->session_mutex);
1435 
1436 	return ret;
1437 }
1438 
vbg_ioctl_hgcm_disconnect(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_hgcm_disconnect * disconn)1439 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1440 				     struct vbg_session *session,
1441 				     struct vbg_ioctl_hgcm_disconnect *disconn)
1442 {
1443 	u32 client_id;
1444 	int i, ret;
1445 
1446 	if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1447 		return -EINVAL;
1448 
1449 	client_id = disconn->u.in.client_id;
1450 	if (client_id == 0 || client_id == U32_MAX)
1451 		return -EINVAL;
1452 
1453 	mutex_lock(&gdev->session_mutex);
1454 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1455 		if (session->hgcm_client_ids[i] == client_id) {
1456 			session->hgcm_client_ids[i] = U32_MAX;
1457 			break;
1458 		}
1459 	}
1460 	mutex_unlock(&gdev->session_mutex);
1461 
1462 	if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1463 		return -EINVAL;
1464 
1465 	ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1466 				  &disconn->hdr.rc);
1467 
1468 	mutex_lock(&gdev->session_mutex);
1469 	if (ret == 0 && disconn->hdr.rc >= 0)
1470 		session->hgcm_client_ids[i] = 0;
1471 	else
1472 		session->hgcm_client_ids[i] = client_id;
1473 	mutex_unlock(&gdev->session_mutex);
1474 
1475 	return ret;
1476 }
1477 
vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)1478 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1479 {
1480 	switch (type) {
1481 	case VMMDEV_HGCM_PARM_TYPE_32BIT:
1482 	case VMMDEV_HGCM_PARM_TYPE_64BIT:
1483 	case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1484 	case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1485 	case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1486 		return true;
1487 	default:
1488 		return false;
1489 	}
1490 }
1491 
vbg_ioctl_hgcm_call(struct vbg_dev * gdev,struct vbg_session * session,bool f32bit,struct vbg_ioctl_hgcm_call * call)1492 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1493 			       struct vbg_session *session, bool f32bit,
1494 			       struct vbg_ioctl_hgcm_call *call)
1495 {
1496 	size_t actual_size;
1497 	u32 client_id;
1498 	int i, ret;
1499 
1500 	if (call->hdr.size_in < sizeof(*call))
1501 		return -EINVAL;
1502 
1503 	if (call->hdr.size_in != call->hdr.size_out)
1504 		return -EINVAL;
1505 
1506 	if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1507 		return -E2BIG;
1508 
1509 	client_id = call->client_id;
1510 	if (client_id == 0 || client_id == U32_MAX)
1511 		return -EINVAL;
1512 
1513 	actual_size = sizeof(*call);
1514 	if (f32bit)
1515 		actual_size += call->parm_count *
1516 			       sizeof(struct vmmdev_hgcm_function_parameter32);
1517 	else
1518 		actual_size += call->parm_count *
1519 			       sizeof(struct vmmdev_hgcm_function_parameter);
1520 	if (call->hdr.size_in < actual_size) {
1521 		vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1522 			  call->hdr.size_in, actual_size);
1523 		return -EINVAL;
1524 	}
1525 	call->hdr.size_out = actual_size;
1526 
1527 	/* Validate parameter types */
1528 	if (f32bit) {
1529 		struct vmmdev_hgcm_function_parameter32 *parm =
1530 			VBG_IOCTL_HGCM_CALL_PARMS32(call);
1531 
1532 		for (i = 0; i < call->parm_count; i++)
1533 			if (!vbg_param_valid(parm[i].type))
1534 				return -EINVAL;
1535 	} else {
1536 		struct vmmdev_hgcm_function_parameter *parm =
1537 			VBG_IOCTL_HGCM_CALL_PARMS(call);
1538 
1539 		for (i = 0; i < call->parm_count; i++)
1540 			if (!vbg_param_valid(parm[i].type))
1541 				return -EINVAL;
1542 	}
1543 
1544 	/*
1545 	 * Validate the client id.
1546 	 */
1547 	mutex_lock(&gdev->session_mutex);
1548 	for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1549 		if (session->hgcm_client_ids[i] == client_id)
1550 			break;
1551 	mutex_unlock(&gdev->session_mutex);
1552 	if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1553 		vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1554 			  client_id);
1555 		return -EINVAL;
1556 	}
1557 
1558 	if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1559 		ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1560 				      call->function, call->timeout_ms,
1561 				      VBG_IOCTL_HGCM_CALL_PARMS32(call),
1562 				      call->parm_count, &call->hdr.rc);
1563 	else
1564 		ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1565 				    call->function, call->timeout_ms,
1566 				    VBG_IOCTL_HGCM_CALL_PARMS(call),
1567 				    call->parm_count, &call->hdr.rc);
1568 
1569 	if (ret == -E2BIG) {
1570 		/* E2BIG needs to be reported through the hdr.rc field. */
1571 		call->hdr.rc = VERR_OUT_OF_RANGE;
1572 		ret = 0;
1573 	}
1574 
1575 	if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1576 		vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1577 
1578 	return ret;
1579 }
1580 
vbg_ioctl_log(struct vbg_ioctl_log * log)1581 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1582 {
1583 	if (log->hdr.size_out != sizeof(log->hdr))
1584 		return -EINVAL;
1585 
1586 	vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1587 		 log->u.in.msg);
1588 
1589 	return 0;
1590 }
1591 
vbg_ioctl_change_filter_mask(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_change_filter * filter)1592 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1593 					struct vbg_session *session,
1594 					struct vbg_ioctl_change_filter *filter)
1595 {
1596 	u32 or_mask, not_mask;
1597 
1598 	if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1599 		return -EINVAL;
1600 
1601 	or_mask = filter->u.in.or_mask;
1602 	not_mask = filter->u.in.not_mask;
1603 
1604 	if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1605 		return -EINVAL;
1606 
1607 	return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1608 					    false);
1609 }
1610 
vbg_ioctl_acquire_guest_capabilities(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_acquire_guest_caps * caps)1611 static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
1612 	     struct vbg_session *session,
1613 	     struct vbg_ioctl_acquire_guest_caps *caps)
1614 {
1615 	u32 flags, or_mask, not_mask;
1616 
1617 	if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
1618 		return -EINVAL;
1619 
1620 	flags = caps->u.in.flags;
1621 	or_mask = caps->u.in.or_mask;
1622 	not_mask = caps->u.in.not_mask;
1623 
1624 	if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
1625 		return -EINVAL;
1626 
1627 	if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1628 		return -EINVAL;
1629 
1630 	return vbg_acquire_session_capabilities(gdev, session, or_mask,
1631 						not_mask, flags, false);
1632 }
1633 
vbg_ioctl_change_guest_capabilities(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_set_guest_caps * caps)1634 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1635 	     struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1636 {
1637 	u32 or_mask, not_mask;
1638 	int ret;
1639 
1640 	if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1641 		return -EINVAL;
1642 
1643 	or_mask = caps->u.in.or_mask;
1644 	not_mask = caps->u.in.not_mask;
1645 
1646 	if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1647 		return -EINVAL;
1648 
1649 	ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1650 					   false);
1651 	if (ret)
1652 		return ret;
1653 
1654 	caps->u.out.session_caps = session->set_guest_caps;
1655 	caps->u.out.global_caps = gdev->guest_caps_host;
1656 
1657 	return 0;
1658 }
1659 
vbg_ioctl_check_balloon(struct vbg_dev * gdev,struct vbg_ioctl_check_balloon * balloon_info)1660 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1661 				   struct vbg_ioctl_check_balloon *balloon_info)
1662 {
1663 	if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1664 		return -EINVAL;
1665 
1666 	balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1667 	/*
1668 	 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1669 	 * events entirely in the kernel, see vbg_core_isr().
1670 	 */
1671 	balloon_info->u.out.handle_in_r3 = false;
1672 
1673 	return 0;
1674 }
1675 
vbg_ioctl_write_core_dump(struct vbg_dev * gdev,struct vbg_session * session,struct vbg_ioctl_write_coredump * dump)1676 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1677 				     struct vbg_session *session,
1678 				     struct vbg_ioctl_write_coredump *dump)
1679 {
1680 	struct vmmdev_write_core_dump *req;
1681 
1682 	if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1683 		return -EINVAL;
1684 
1685 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1686 			    session->requestor);
1687 	if (!req)
1688 		return -ENOMEM;
1689 
1690 	req->flags = dump->u.in.flags;
1691 	dump->hdr.rc = vbg_req_perform(gdev, req);
1692 
1693 	vbg_req_free(req, sizeof(*req));
1694 	return 0;
1695 }
1696 
1697 /**
1698  * vbg_core_ioctl - Common IOCtl for user to kernel communication.
1699  * @session:	The client session.
1700  * @req:	The requested function.
1701  * @data:	The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1702  *
1703  * Return: %0 or negative errno value.
1704  */
vbg_core_ioctl(struct vbg_session * session,unsigned int req,void * data)1705 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1706 {
1707 	unsigned int req_no_size = req & ~IOCSIZE_MASK;
1708 	struct vbg_dev *gdev = session->gdev;
1709 	struct vbg_ioctl_hdr *hdr = data;
1710 	bool f32bit = false;
1711 
1712 	hdr->rc = VINF_SUCCESS;
1713 	if (!hdr->size_out)
1714 		hdr->size_out = hdr->size_in;
1715 
1716 	/*
1717 	 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1718 	 * already checked by vbg_misc_device_ioctl().
1719 	 */
1720 
1721 	/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1722 	if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1723 	    req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
1724 	    req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
1725 		return vbg_ioctl_vmmrequest(gdev, session, data);
1726 
1727 	if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1728 		return -EINVAL;
1729 
1730 	/* Fixed size requests. */
1731 	switch (req) {
1732 	case VBG_IOCTL_DRIVER_VERSION_INFO:
1733 		return vbg_ioctl_driver_version_info(data);
1734 	case VBG_IOCTL_HGCM_CONNECT:
1735 		return vbg_ioctl_hgcm_connect(gdev, session, data);
1736 	case VBG_IOCTL_HGCM_DISCONNECT:
1737 		return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1738 	case VBG_IOCTL_WAIT_FOR_EVENTS:
1739 		return vbg_ioctl_wait_for_events(gdev, session, data);
1740 	case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1741 		return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1742 	case VBG_IOCTL_CHANGE_FILTER_MASK:
1743 		return vbg_ioctl_change_filter_mask(gdev, session, data);
1744 	case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
1745 		return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
1746 	case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1747 		return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1748 	case VBG_IOCTL_CHECK_BALLOON:
1749 		return vbg_ioctl_check_balloon(gdev, data);
1750 	case VBG_IOCTL_WRITE_CORE_DUMP:
1751 		return vbg_ioctl_write_core_dump(gdev, session, data);
1752 	}
1753 
1754 	/* Variable sized requests. */
1755 	switch (req_no_size) {
1756 #ifdef CONFIG_COMPAT
1757 	case VBG_IOCTL_HGCM_CALL_32(0):
1758 		f32bit = true;
1759 		fallthrough;
1760 #endif
1761 	case VBG_IOCTL_HGCM_CALL(0):
1762 		return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1763 	case VBG_IOCTL_LOG(0):
1764 	case VBG_IOCTL_LOG_ALT(0):
1765 		return vbg_ioctl_log(data);
1766 	}
1767 
1768 	vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
1769 	return -ENOTTY;
1770 }
1771 
1772 /**
1773  * vbg_core_set_mouse_status - Report guest supported mouse-features to the host.
1774  *
1775  * @gdev:		The Guest extension device.
1776  * @features:		The set of features to report to the host.
1777  *
1778  * Return: %0 or negative errno value.
1779  */
vbg_core_set_mouse_status(struct vbg_dev * gdev,u32 features)1780 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1781 {
1782 	struct vmmdev_mouse_status *req;
1783 	int rc;
1784 
1785 	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1786 			    VBG_KERNEL_REQUEST);
1787 	if (!req)
1788 		return -ENOMEM;
1789 
1790 	req->mouse_features = features;
1791 	req->pointer_pos_x = 0;
1792 	req->pointer_pos_y = 0;
1793 
1794 	rc = vbg_req_perform(gdev, req);
1795 	if (rc < 0)
1796 		vbg_err("%s error, rc: %d\n", __func__, rc);
1797 
1798 	vbg_req_free(req, sizeof(*req));
1799 	return vbg_status_code_to_errno(rc);
1800 }
1801 
1802 /* Core interrupt service routine. */
vbg_core_isr(int irq,void * dev_id)1803 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1804 {
1805 	struct vbg_dev *gdev = dev_id;
1806 	struct vmmdev_events *req = gdev->ack_events_req;
1807 	bool mouse_position_changed = false;
1808 	unsigned long flags;
1809 	u32 events = 0;
1810 	int rc;
1811 
1812 	if (!gdev->mmio->V.V1_04.have_events)
1813 		return IRQ_NONE;
1814 
1815 	/* Get and acknowlegde events. */
1816 	req->header.rc = VERR_INTERNAL_ERROR;
1817 	req->events = 0;
1818 	rc = vbg_req_perform(gdev, req);
1819 	if (rc < 0) {
1820 		vbg_err("Error performing events req, rc: %d\n", rc);
1821 		return IRQ_NONE;
1822 	}
1823 
1824 	events = req->events;
1825 
1826 	if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1827 		mouse_position_changed = true;
1828 		events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1829 	}
1830 
1831 	if (events & VMMDEV_EVENT_HGCM) {
1832 		wake_up(&gdev->hgcm_wq);
1833 		events &= ~VMMDEV_EVENT_HGCM;
1834 	}
1835 
1836 	if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1837 		schedule_work(&gdev->mem_balloon.work);
1838 		events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1839 	}
1840 
1841 	if (events) {
1842 		spin_lock_irqsave(&gdev->event_spinlock, flags);
1843 		gdev->pending_events |= events;
1844 		spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1845 
1846 		wake_up(&gdev->event_wq);
1847 	}
1848 
1849 	if (mouse_position_changed)
1850 		vbg_linux_mouse_event(gdev);
1851 
1852 	return IRQ_HANDLED;
1853 }
1854