xref: /freebsd/sys/riscv/vmm/vmm_fence.c (revision 17b7a0c595a51eaa7e83f16e99e1555bd13a445b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by the University of Cambridge Computer
7  * Laboratory (Department of Computer Science and Technology) under Innovate
8  * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
9  * Prototype".
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/smp.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/bus.h>
41 
42 #include "riscv.h"
43 #include "vmm_fence.h"
44 
45 static bool
46 vmm_fence_dequeue(struct hypctx *hypctx, struct vmm_fence *new_fence)
47 {
48 	struct vmm_fence *queue;
49 	struct vmm_fence *fence;
50 
51 	mtx_lock_spin(&hypctx->fence_queue_mtx);
52 	queue = hypctx->fence_queue;
53 	fence = &queue[hypctx->fence_queue_head];
54 	if (fence->type != VMM_RISCV_FENCE_INVALID) {
55 		*new_fence = *fence;
56 		fence->type = VMM_RISCV_FENCE_INVALID;
57 		hypctx->fence_queue_head =
58 		    (hypctx->fence_queue_head + 1) % VMM_FENCE_QUEUE_SIZE;
59 	} else {
60 		mtx_unlock_spin(&hypctx->fence_queue_mtx);
61 		return (false);
62 	}
63 	mtx_unlock_spin(&hypctx->fence_queue_mtx);
64 
65 	return (true);
66 }
67 
68 static bool
69 vmm_fence_enqueue(struct hypctx *hypctx, struct vmm_fence *new_fence)
70 {
71 	struct vmm_fence *queue;
72 	struct vmm_fence *fence;
73 
74 	mtx_lock_spin(&hypctx->fence_queue_mtx);
75 	queue = hypctx->fence_queue;
76 	fence = &queue[hypctx->fence_queue_tail];
77 	if (fence->type == VMM_RISCV_FENCE_INVALID) {
78 		*fence = *new_fence;
79 		hypctx->fence_queue_tail =
80 		    (hypctx->fence_queue_tail + 1) % VMM_FENCE_QUEUE_SIZE;
81 	} else {
82 		mtx_unlock_spin(&hypctx->fence_queue_mtx);
83 		return (false);
84 	}
85 	mtx_unlock_spin(&hypctx->fence_queue_mtx);
86 
87 	return (true);
88 }
89 
90 static void
91 vmm_fence_process_one(struct vmm_fence *fence)
92 {
93 	uint64_t va;
94 
95 	KASSERT(fence->type == VMM_RISCV_FENCE_VMA ||
96 	    fence->type == VMM_RISCV_FENCE_VMA_ASID,
97 	    ("%s: wrong fence type %d", __func__, fence->type));
98 
99 	switch (fence->type) {
100 	case VMM_RISCV_FENCE_VMA:
101 		for (va = fence->start; va < fence->start + fence->size;
102 		    va += PAGE_SIZE)
103 			sfence_vma_page(va);
104 		break;
105 	case VMM_RISCV_FENCE_VMA_ASID:
106 		if (fence->start == 0 && fence->size == 0)
107 			sfence_vma_asid(fence->asid);
108 		else
109 			for (va = fence->start; va < fence->start + fence->size;
110 			    va += PAGE_SIZE)
111 				sfence_vma_asid_page(fence->asid, va);
112 		break;
113 	default:
114 		break;
115 	}
116 }
117 
118 void
119 vmm_fence_process(struct hypctx *hypctx)
120 {
121 	struct vmm_fence fence;
122 	int pending;
123 
124 	pending = atomic_readandclear_32(&hypctx->fence_req);
125 
126 	KASSERT((pending & ~(FENCE_REQ_I | FENCE_REQ_VMA)) == 0,
127 	    ("wrong fence bit mask"));
128 
129 	if (pending & FENCE_REQ_I)
130 		fence_i();
131 
132 	if (pending & FENCE_REQ_VMA)
133 		sfence_vma();
134 
135 	while (vmm_fence_dequeue(hypctx, &fence) == true)
136 		vmm_fence_process_one(&fence);
137 }
138 
139 void
140 vmm_fence_add(struct vm *vm, cpuset_t *cpus, struct vmm_fence *fence)
141 {
142 	struct hypctx *hypctx;
143 	cpuset_t running_cpus;
144 	struct vcpu *vcpu;
145 	uint16_t maxcpus;
146 	int hostcpu;
147 	int state;
148 	bool enq;
149 	int i;
150 
151 	CPU_ZERO(&running_cpus);
152 
153 	maxcpus = vm_get_maxcpus(vm);
154 	for (i = 0; i < maxcpus; i++) {
155 		if (!CPU_ISSET(i, cpus))
156 			continue;
157 		vcpu = vm_vcpu(vm, i);
158 		hypctx = vcpu_get_cookie(vcpu);
159 
160 		enq = false;
161 
162 		/* No need to enqueue fences i and vma global. */
163 		switch (fence->type) {
164 		case VMM_RISCV_FENCE_I:
165 			atomic_set_32(&hypctx->fence_req, FENCE_REQ_I);
166 			break;
167 		case VMM_RISCV_FENCE_VMA:
168 			if (fence->start == 0 && fence->size == 0)
169 				atomic_set_32(&hypctx->fence_req,
170 				    FENCE_REQ_VMA);
171 			else
172 				enq = true;
173 			break;
174 		case VMM_RISCV_FENCE_VMA_ASID:
175 			enq = true;
176 			break;
177 		default:
178 			KASSERT(0, ("%s: wrong fence type %d", __func__,
179 			    fence->type));
180 			break;
181 		}
182 
183 		/*
184 		 * Try to enqueue. In case of failure use more conservative
185 		 * request.
186 		 */
187 		if (enq)
188 			if (vmm_fence_enqueue(hypctx, fence) == false)
189 				atomic_set_32(&hypctx->fence_req,
190 				    FENCE_REQ_VMA);
191 
192 		mb();
193 
194 		state = vcpu_get_state(vcpu, &hostcpu);
195 		if (state == VCPU_RUNNING)
196 			CPU_SET(hostcpu, &running_cpus);
197 	}
198 
199 	/*
200 	 * Interrupt other cores. On reception of IPI they will leave guest.
201 	 * On entry back to the guest they will process fence request.
202 	 *
203 	 * If vcpu migrates to another cpu right here, it should process
204 	 * all fences on entry to the guest as well.
205 	 */
206 	if (!CPU_EMPTY(&running_cpus))
207 		smp_rendezvous_cpus(running_cpus, NULL, NULL, NULL, NULL);
208 }
209