xref: /freebsd/sys/arm64/vmm/vmm_instruction_emul.c (revision 47e073941f4e7ca6e9bde3fa65abbfcfed6bfa2b)
1*47e07394SAndrew Turner /*-
2*47e07394SAndrew Turner  * SPDX-License-Identifier: BSD-2-Clause
3*47e07394SAndrew Turner  *
4*47e07394SAndrew Turner  * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
5*47e07394SAndrew Turner  * All rights reserved.
6*47e07394SAndrew Turner  *
7*47e07394SAndrew Turner  * Redistribution and use in source and binary forms, with or without
8*47e07394SAndrew Turner  * modification, are permitted provided that the following conditions
9*47e07394SAndrew Turner  * are met:
10*47e07394SAndrew Turner  * 1. Redistributions of source code must retain the above copyright
11*47e07394SAndrew Turner  *    notice, this list of conditions and the following disclaimer.
12*47e07394SAndrew Turner  * 2. Redistributions in binary form must reproduce the above copyright
13*47e07394SAndrew Turner  *    notice, this list of conditions and the following disclaimer in the
14*47e07394SAndrew Turner  *    documentation and/or other materials provided with the distribution.
15*47e07394SAndrew Turner  *
16*47e07394SAndrew Turner  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17*47e07394SAndrew Turner  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18*47e07394SAndrew Turner  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19*47e07394SAndrew Turner  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20*47e07394SAndrew Turner  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*47e07394SAndrew Turner  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22*47e07394SAndrew Turner  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23*47e07394SAndrew Turner  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24*47e07394SAndrew Turner  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25*47e07394SAndrew Turner  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26*47e07394SAndrew Turner  * SUCH DAMAGE.
27*47e07394SAndrew Turner  */
28*47e07394SAndrew Turner 
29*47e07394SAndrew Turner #ifdef _KERNEL
30*47e07394SAndrew Turner #include <sys/param.h>
31*47e07394SAndrew Turner #include <sys/pcpu.h>
32*47e07394SAndrew Turner #include <sys/systm.h>
33*47e07394SAndrew Turner #include <sys/proc.h>
34*47e07394SAndrew Turner 
35*47e07394SAndrew Turner #include <vm/vm.h>
36*47e07394SAndrew Turner 
37*47e07394SAndrew Turner #include <machine/machdep.h>
38*47e07394SAndrew Turner #include <machine/vmm.h>
39*47e07394SAndrew Turner #else
40*47e07394SAndrew Turner #include <sys/types.h>
41*47e07394SAndrew Turner #include <sys/errno.h>
42*47e07394SAndrew Turner #include <sys/_iovec.h>
43*47e07394SAndrew Turner 
44*47e07394SAndrew Turner #include <machine/vmm.h>
45*47e07394SAndrew Turner 
46*47e07394SAndrew Turner #include <assert.h>
47*47e07394SAndrew Turner #include <stdio.h>
48*47e07394SAndrew Turner #include <stdlib.h>
49*47e07394SAndrew Turner #include <vmmapi.h>
50*47e07394SAndrew Turner #endif
51*47e07394SAndrew Turner 
52*47e07394SAndrew Turner #include <machine/vmm_instruction_emul.h>
53*47e07394SAndrew Turner 
54*47e07394SAndrew Turner int
55*47e07394SAndrew Turner vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
56*47e07394SAndrew Turner     struct vm_guest_paging *paging __unused, mem_region_read_t memread,
57*47e07394SAndrew Turner     mem_region_write_t memwrite, void *memarg)
58*47e07394SAndrew Turner {
59*47e07394SAndrew Turner 	uint64_t val;
60*47e07394SAndrew Turner 	int error;
61*47e07394SAndrew Turner 
62*47e07394SAndrew Turner 	if (vie->dir == VM_DIR_READ) {
63*47e07394SAndrew Turner 		error = memread(vcpu, gpa, &val, vie->access_size, memarg);
64*47e07394SAndrew Turner 		if (error)
65*47e07394SAndrew Turner 			goto out;
66*47e07394SAndrew Turner 		error = vm_set_register(vcpu, vie->reg, val);
67*47e07394SAndrew Turner 	} else {
68*47e07394SAndrew Turner 		error = vm_get_register(vcpu, vie->reg, &val);
69*47e07394SAndrew Turner 		if (error)
70*47e07394SAndrew Turner 			goto out;
71*47e07394SAndrew Turner 		/* Mask any unneeded bits from the register */
72*47e07394SAndrew Turner 		if (vie->access_size < 8)
73*47e07394SAndrew Turner 			val &= (1ul << (vie->access_size * 8)) - 1;
74*47e07394SAndrew Turner 		error = memwrite(vcpu, gpa, val, vie->access_size, memarg);
75*47e07394SAndrew Turner 	}
76*47e07394SAndrew Turner 
77*47e07394SAndrew Turner out:
78*47e07394SAndrew Turner 	return (error);
79*47e07394SAndrew Turner }
80*47e07394SAndrew Turner 
81*47e07394SAndrew Turner int
82*47e07394SAndrew Turner vmm_emulate_register(struct vcpu *vcpu, struct vre *vre, reg_read_t regread,
83*47e07394SAndrew Turner     reg_write_t regwrite, void *regarg)
84*47e07394SAndrew Turner {
85*47e07394SAndrew Turner 	uint64_t val;
86*47e07394SAndrew Turner 	int error;
87*47e07394SAndrew Turner 
88*47e07394SAndrew Turner 	if (vre->dir == VM_DIR_READ) {
89*47e07394SAndrew Turner 		error = regread(vcpu, &val, regarg);
90*47e07394SAndrew Turner 		if (error)
91*47e07394SAndrew Turner 			goto out;
92*47e07394SAndrew Turner 		error = vm_set_register(vcpu, vre->reg, val);
93*47e07394SAndrew Turner 	} else {
94*47e07394SAndrew Turner 		error = vm_get_register(vcpu, vre->reg, &val);
95*47e07394SAndrew Turner 		if (error)
96*47e07394SAndrew Turner 			goto out;
97*47e07394SAndrew Turner 		error = regwrite(vcpu, val, regarg);
98*47e07394SAndrew Turner 	}
99*47e07394SAndrew Turner 
100*47e07394SAndrew Turner out:
101*47e07394SAndrew Turner 	return (error);
102*47e07394SAndrew Turner }
103