xref: /freebsd/sys/arm64/vmm/vmm_nvhe.c (revision 3750ccefb8629a08890bfbae894dd6bc6a7483b4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Andrew Turner
5  * Copyright (c) 2024 Arm Ltd
6  *
7  * This work was supported by Innovate UK project 105694, "Digital Security
8  * by Design (DSbD) Technology Platform Prototype".
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #define	VMM_STATIC	static
33 #define	VMM_HYP_FUNC(func)	vmm_nvhe_ ## func
34 
35 #define	guest_or_nonvhe(guest)	(true)
36 #define	EL1_REG(reg)		MRS_REG_ALT_NAME(reg ## _EL1)
37 #define	EL0_REG(reg)		MRS_REG_ALT_NAME(reg ## _EL0)
38 
39 #include "vmm_hyp.c"
40 
41 uint64_t vmm_hyp_enter(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
42     uint64_t, uint64_t, uint64_t);
43 
44 /*
45  * Handlers for EL2 addres space. Only needed by non-VHE code as in VHE the
46  * kernel is in EL2 so pmap will manage the address space.
47  */
48 static int
49 vmm_dc_civac(uint64_t start, uint64_t len)
50 {
51 	size_t line_size, end;
52 	uint64_t ctr;
53 
54 	ctr = READ_SPECIALREG(ctr_el0);
55 	line_size = sizeof(int) << CTR_DLINE_SIZE(ctr);
56 	end = start + len;
57 	dsb(ishst);
58 	/* Clean and Invalidate the D-cache */
59 	for (; start < end; start += line_size)
60 		__asm __volatile("dc	civac, %0" :: "r" (start) : "memory");
61 	dsb(ish);
62 	return (0);
63 }
64 
65 static int
66 vmm_el2_tlbi(uint64_t type, uint64_t start, uint64_t len)
67 {
68 	uint64_t end, r;
69 
70 	dsb(ishst);
71 	switch (type) {
72 	default:
73 	case HYP_EL2_TLBI_ALL:
74 		__asm __volatile("tlbi	alle2" ::: "memory");
75 		break;
76 	case HYP_EL2_TLBI_VA:
77 		end = TLBI_VA(start + len);
78 		start = TLBI_VA(start);
79 		for (r = start; r < end; r += TLBI_VA_L3_INCR) {
80 			__asm __volatile("tlbi	vae2is, %0" :: "r"(r));
81 		}
82 		break;
83 	}
84 	dsb(ish);
85 
86 	return (0);
87 }
88 
89 uint64_t
90 vmm_hyp_enter(uint64_t handle, uint64_t x1, uint64_t x2, uint64_t x3,
91     uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7)
92 {
93 	switch (handle) {
94 	case HYP_ENTER_GUEST:
95 		return (VMM_HYP_FUNC(enter_guest)((struct hyp *)x1,
96 		    (struct hypctx *)x2));
97 	case HYP_READ_REGISTER:
98 		return (VMM_HYP_FUNC(read_reg)(x1));
99 	case HYP_CLEAN_S2_TLBI:
100 		VMM_HYP_FUNC(clean_s2_tlbi());
101 		return (0);
102 	case HYP_DC_CIVAC:
103 		return (vmm_dc_civac(x1, x2));
104 	case HYP_EL2_TLBI:
105 		return (vmm_el2_tlbi(x1, x2, x3));
106 	case HYP_S2_TLBI_RANGE:
107 		VMM_HYP_FUNC(s2_tlbi_range)(x1, x2, x3, x4);
108 		return (0);
109 	case HYP_S2_TLBI_ALL:
110 		VMM_HYP_FUNC(s2_tlbi_all)(x1);
111 		return (0);
112 	case HYP_CLEANUP:	/* Handled in vmm_hyp_exception.S */
113 	default:
114 		break;
115 	}
116 
117 	return (0);
118 }
119