xref: /freebsd/sys/riscv/vmm/vmm_sbi.c (revision 4eee1381396714175495b395bd6e74b263b2b16d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024-2025 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by the University of Cambridge Computer
7  * Laboratory (Department of Computer Science and Technology) under Innovate
8  * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
9  * Prototype".
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/proc.h>
36 
37 #include <machine/sbi.h>
38 
39 #include "riscv.h"
40 #include "vmm_fence.h"
41 
42 static int
vmm_sbi_handle_rfnc(struct vcpu * vcpu,struct hypctx * hypctx)43 vmm_sbi_handle_rfnc(struct vcpu *vcpu, struct hypctx *hypctx)
44 {
45 	struct vmm_fence fence;
46 	cpuset_t active_cpus;
47 	uint64_t hart_mask;
48 	uint64_t hart_mask_base;
49 	uint64_t func_id;
50 	struct hyp *hyp;
51 	uint16_t maxcpus;
52 	cpuset_t cpus;
53 	int i;
54 
55 	func_id = hypctx->guest_regs.hyp_a[6];
56 	hart_mask = hypctx->guest_regs.hyp_a[0];
57 	hart_mask_base = hypctx->guest_regs.hyp_a[1];
58 
59 	/* Construct vma_fence. */
60 
61 	fence.start = hypctx->guest_regs.hyp_a[2];
62 	fence.size = hypctx->guest_regs.hyp_a[3];
63 	fence.asid = hypctx->guest_regs.hyp_a[4];
64 
65 	switch (func_id) {
66 	case SBI_RFNC_REMOTE_FENCE_I:
67 		fence.type = VMM_RISCV_FENCE_I;
68 		break;
69 	case SBI_RFNC_REMOTE_SFENCE_VMA:
70 		fence.type = VMM_RISCV_FENCE_VMA;
71 		break;
72 	case SBI_RFNC_REMOTE_SFENCE_VMA_ASID:
73 		fence.type = VMM_RISCV_FENCE_VMA_ASID;
74 		break;
75 	default:
76 		return (SBI_ERR_NOT_SUPPORTED);
77 	}
78 
79 	/* Construct cpuset_t from the mask supplied. */
80 	CPU_ZERO(&cpus);
81 	hyp = hypctx->hyp;
82 	active_cpus = vm_active_cpus(hyp->vm);
83 	maxcpus = vm_get_maxcpus(hyp->vm);
84 	for (i = 0; i < maxcpus; i++) {
85 		vcpu = vm_vcpu(hyp->vm, i);
86 		if (vcpu == NULL)
87 			continue;
88 		if (hart_mask_base != -1UL) {
89 			if (i < hart_mask_base)
90 				continue;
91 			if (!(hart_mask & (1UL << (i - hart_mask_base))))
92 				continue;
93 		}
94 		/*
95 		 * If either hart_mask_base or at least one hartid from
96 		 * hart_mask is not valid, then return error.
97 		 */
98 		if (!CPU_ISSET(i, &active_cpus))
99 			return (SBI_ERR_INVALID_PARAM);
100 		CPU_SET(i, &cpus);
101 	}
102 
103 	if (CPU_EMPTY(&cpus))
104 		return (SBI_ERR_INVALID_PARAM);
105 
106 	vmm_fence_add(hyp->vm, &cpus, &fence);
107 
108 	return (SBI_SUCCESS);
109 }
110 
111 static int
vmm_sbi_handle_time(struct vcpu * vcpu,struct hypctx * hypctx)112 vmm_sbi_handle_time(struct vcpu *vcpu, struct hypctx *hypctx)
113 {
114 	uint64_t func_id;
115 	uint64_t next_val;
116 
117 	func_id = hypctx->guest_regs.hyp_a[6];
118 	next_val = hypctx->guest_regs.hyp_a[0];
119 
120 	switch (func_id) {
121 	case SBI_TIME_SET_TIMER:
122 		vtimer_set_timer(hypctx, next_val);
123 		break;
124 	default:
125 		return (SBI_ERR_NOT_SUPPORTED);
126 	}
127 
128 	return (SBI_SUCCESS);
129 }
130 
131 static int
vmm_sbi_handle_ipi(struct vcpu * vcpu,struct hypctx * hypctx)132 vmm_sbi_handle_ipi(struct vcpu *vcpu, struct hypctx *hypctx)
133 {
134 	cpuset_t active_cpus;
135 	struct hyp *hyp;
136 	uint64_t hart_mask;
137 	uint64_t hart_mask_base;
138 	uint64_t func_id;
139 	cpuset_t cpus;
140 	int hart_id;
141 	int bit;
142 
143 	func_id = hypctx->guest_regs.hyp_a[6];
144 	hart_mask = hypctx->guest_regs.hyp_a[0];
145 	hart_mask_base = hypctx->guest_regs.hyp_a[1];
146 
147 	dprintf("%s: hart_mask %lx\n", __func__, hart_mask);
148 
149 	hyp = hypctx->hyp;
150 
151 	active_cpus = vm_active_cpus(hyp->vm);
152 
153 	CPU_ZERO(&cpus);
154 	switch (func_id) {
155 	case SBI_IPI_SEND_IPI:
156 		while ((bit = ffs(hart_mask))) {
157 			hart_id = (bit - 1);
158 			hart_mask &= ~(1u << hart_id);
159 			if (hart_mask_base != -1)
160 				hart_id += hart_mask_base;
161 			if (!CPU_ISSET(hart_id, &active_cpus))
162 				return (SBI_ERR_INVALID_PARAM);
163 			CPU_SET(hart_id, &cpus);
164 		}
165 		break;
166 	default:
167 		dprintf("%s: unknown func %ld\n", __func__, func_id);
168 		return (SBI_ERR_NOT_SUPPORTED);
169 	}
170 
171 	if (CPU_EMPTY(&cpus))
172 		return (SBI_ERR_INVALID_PARAM);
173 
174 	riscv_send_ipi(hyp, &cpus);
175 
176 	return (SBI_SUCCESS);
177 }
178 
179 bool
vmm_sbi_ecall(struct vcpu * vcpu)180 vmm_sbi_ecall(struct vcpu *vcpu)
181 {
182 	int sbi_extension_id;
183 	struct hypctx *hypctx;
184 	int error;
185 
186 	hypctx = riscv_get_active_vcpu();
187 	sbi_extension_id = hypctx->guest_regs.hyp_a[7];
188 
189 	dprintf("%s: args %lx %lx %lx %lx %lx %lx %lx %lx\n", __func__,
190 	    hypctx->guest_regs.hyp_a[0],
191 	    hypctx->guest_regs.hyp_a[1],
192 	    hypctx->guest_regs.hyp_a[2],
193 	    hypctx->guest_regs.hyp_a[3],
194 	    hypctx->guest_regs.hyp_a[4],
195 	    hypctx->guest_regs.hyp_a[5],
196 	    hypctx->guest_regs.hyp_a[6],
197 	    hypctx->guest_regs.hyp_a[7]);
198 
199 	switch (sbi_extension_id) {
200 	case SBI_EXT_ID_RFNC:
201 		error = vmm_sbi_handle_rfnc(vcpu, hypctx);
202 		break;
203 	case SBI_EXT_ID_TIME:
204 		error = vmm_sbi_handle_time(vcpu, hypctx);
205 		break;
206 	case SBI_EXT_ID_IPI:
207 		error = vmm_sbi_handle_ipi(vcpu, hypctx);
208 		break;
209 	default:
210 		/* Return to handle in userspace. */
211 		return (false);
212 	}
213 
214 	hypctx->guest_regs.hyp_a[0] = error;
215 
216 	/* Request is handled in kernel mode. */
217 	return (true);
218 }
219