xref: /freebsd/sys/arm64/vmm/vmm_handlers.c (revision 7279fa6af13d1c370f9bcb293562c694090d849d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024 Arm Ltd
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 
31 #include <machine/ifunc.h>
32 
33 #include "arm64.h"
34 #include "vmm_handlers.h"
35 
36 /* Read an EL2 register */
37 static uint64_t
vmm_nvhe_read_reg(uint64_t reg)38 vmm_nvhe_read_reg(uint64_t reg)
39 {
40 	return (vmm_call_hyp(HYP_READ_REGISTER, reg));
41 }
42 
43 DEFINE_IFUNC(, uint64_t, vmm_read_reg, (uint64_t reg))
44 {
45 	if (in_vhe())
46 		return (vmm_vhe_read_reg);
47 	return (vmm_nvhe_read_reg);
48 }
49 
50 /* Enter the guest */
51 static uint64_t
vmm_nvhe_enter_guest(struct hyp * hyp,struct hypctx * hypctx)52 vmm_nvhe_enter_guest(struct hyp *hyp, struct hypctx *hypctx)
53 {
54 	return (vmm_call_hyp(HYP_ENTER_GUEST, hyp->el2_addr, hypctx->el2_addr));
55 }
56 
57 DEFINE_IFUNC(, uint64_t, vmm_enter_guest,
58     (struct hyp *hyp, struct hypctx *hypctx))
59 {
60 	if (in_vhe())
61 		return (vmm_vhe_enter_guest);
62 	return (vmm_nvhe_enter_guest);
63 }
64 
65 /* Clean the TLB for all guests */
66 static void
vmm_nvhe_clean_s2_tlbi(void)67 vmm_nvhe_clean_s2_tlbi(void)
68 {
69 	vmm_call_hyp(HYP_CLEAN_S2_TLBI);
70 }
71 
72 DEFINE_IFUNC(, void, vmm_clean_s2_tlbi, (void))
73 {
74 	if (in_vhe())
75 		return (vmm_vhe_clean_s2_tlbi);
76 	return (vmm_nvhe_clean_s2_tlbi);
77 }
78 
79 /*
80  * Switch to a guest vttbr and clean the TLB for a range of guest
81  * virtual address space.
82  */
83 static void
vmm_nvhe_s2_tlbi_range(uint64_t vttbr,vm_offset_t sva,vm_offset_t eva,bool final_only)84 vmm_nvhe_s2_tlbi_range(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
85     bool final_only)
86 {
87 	vmm_call_hyp(HYP_S2_TLBI_RANGE, vttbr, sva, eva, final_only);
88 }
89 
90 DEFINE_IFUNC(, void, vmm_s2_tlbi_range,
91     (uint64_t vttbr, vm_offset_t sva, vm_offset_t eva, bool final_only))
92 {
93 	if (in_vhe())
94 		return (vmm_vhe_s2_tlbi_range);
95 	return (vmm_nvhe_s2_tlbi_range);
96 }
97 
98 /*
99  * Switch to a guest vttbr and clean the TLB for all the guest
100  * virtual address space.
101  */
102 static void
vmm_nvhe_s2_tlbi_all(uint64_t vttbr)103 vmm_nvhe_s2_tlbi_all(uint64_t vttbr)
104 {
105 	vmm_call_hyp(HYP_S2_TLBI_ALL, vttbr);
106 }
107 
108 DEFINE_IFUNC(, void, vmm_s2_tlbi_all, (uint64_t vttbr))
109 {
110 	if (in_vhe())
111 		return (vmm_vhe_s2_tlbi_all);
112 	return (vmm_nvhe_s2_tlbi_all);
113 }
114