xref: /freebsd/usr.sbin/bhyve/mem.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Memory ranges are represented with an RB tree. On insertion, the range
31  * is checked for overlaps. On lookup, the key has the same base and limit
32  * so it can be searched within the range.
33  */
34 
35 #include <sys/cdefs.h>
36 #include <sys/types.h>
37 #include <sys/errno.h>
38 #include <sys/tree.h>
39 #include <machine/vmm.h>
40 #include <machine/vmm_instruction_emul.h>
41 
42 #include <assert.h>
43 #include <err.h>
44 #include <pthread.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <vmmapi.h>
48 
49 #include "mem.h"
50 
51 struct mmio_rb_range {
52 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
53 	struct mem_range	mr_param;
54 	uint64_t                mr_base;
55 	uint64_t                mr_end;
56 };
57 
58 struct mmio_rb_tree;
59 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
60 
61 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
62 
63 /*
64  * Per-vCPU cache. Since most accesses from a vCPU will be to
65  * consecutive addresses in a range, it makes sense to cache the
66  * result of a lookup.
67  */
68 static struct mmio_rb_range	**mmio_hint;
69 static int mmio_ncpu;
70 
71 static pthread_rwlock_t mmio_rwlock;
72 
73 static int
74 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
75 {
76 	if (a->mr_end < b->mr_base)
77 		return (-1);
78 	else if (a->mr_base > b->mr_end)
79 		return (1);
80 	return (0);
81 }
82 
83 static int
84 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
85     struct mmio_rb_range **entry)
86 {
87 	struct mmio_rb_range find, *res;
88 
89 	find.mr_base = find.mr_end = addr;
90 
91 	res = RB_FIND(mmio_rb_tree, rbt, &find);
92 
93 	if (res != NULL) {
94 		*entry = res;
95 		return (0);
96 	}
97 
98 	return (ENOENT);
99 }
100 
101 static int
102 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
103 {
104 	struct mmio_rb_range *overlap;
105 
106 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
107 
108 	if (overlap != NULL) {
109 #ifdef RB_DEBUG
110 		printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
111 		       "claims region already claimed for '%s'\n",
112 		       new->mr_base, new->mr_end,
113 		       overlap->mr_base, overlap->mr_end,
114 		       new->mr_param.name, overlap->mr_param.name);
115 #endif
116 
117 		return (EEXIST);
118 	}
119 
120 	return (0);
121 }
122 
123 #if 0
124 static void
125 mmio_rb_dump(struct mmio_rb_tree *rbt)
126 {
127 	int perror;
128 	struct mmio_rb_range *np;
129 
130 	pthread_rwlock_rdlock(&mmio_rwlock);
131 	RB_FOREACH(np, mmio_rb_tree, rbt) {
132 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
133 		       np->mr_param.name);
134 	}
135 	perror = pthread_rwlock_unlock(&mmio_rwlock);
136 	assert(perror == 0);
137 }
138 #endif
139 
140 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
141 
142 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
143     void *arg);
144 
145 static int
146 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
147 {
148 	int error;
149 	struct mem_range *mr = arg;
150 
151 	error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
152 	    mr->arg2);
153 	return (error);
154 }
155 
156 static int
157 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
158 {
159 	int error;
160 	struct mem_range *mr = arg;
161 
162 	error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
163 	    mr->arg2);
164 	return (error);
165 }
166 
167 static int
168 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
169 {
170 	struct mmio_rb_range *entry;
171 	int err, perror, immutable, vcpuid;
172 
173 	vcpuid = vcpu_id(vcpu);
174 	pthread_rwlock_rdlock(&mmio_rwlock);
175 	/*
176 	 * First check the per-vCPU cache
177 	 */
178 	if (mmio_hint[vcpuid] &&
179 	    paddr >= mmio_hint[vcpuid]->mr_base &&
180 	    paddr <= mmio_hint[vcpuid]->mr_end) {
181 		entry = mmio_hint[vcpuid];
182 	} else
183 		entry = NULL;
184 
185 	if (entry == NULL) {
186 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
187 			/* Update the per-vCPU cache */
188 			mmio_hint[vcpuid] = entry;
189 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
190 			perror = pthread_rwlock_unlock(&mmio_rwlock);
191 			assert(perror == 0);
192 			return (ESRCH);
193 		}
194 	}
195 
196 	assert(entry != NULL);
197 
198 	/*
199 	 * An 'immutable' memory range is guaranteed to be never removed
200 	 * so there is no need to hold 'mmio_rwlock' while calling the
201 	 * handler.
202 	 *
203 	 * XXX writes to the PCIR_COMMAND register can cause register_mem()
204 	 * to be called. If the guest is using PCI extended config space
205 	 * to modify the PCIR_COMMAND register then register_mem() can
206 	 * deadlock on 'mmio_rwlock'. However by registering the extended
207 	 * config space window as 'immutable' the deadlock can be avoided.
208 	 */
209 	immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
210 	if (immutable) {
211 		perror = pthread_rwlock_unlock(&mmio_rwlock);
212 		assert(perror == 0);
213 	}
214 
215 	err = cb(vcpu, paddr, &entry->mr_param, arg);
216 
217 	if (!immutable) {
218 		perror = pthread_rwlock_unlock(&mmio_rwlock);
219 		assert(perror == 0);
220 	}
221 
222 	return (err);
223 }
224 
225 struct emulate_mem_args {
226 	struct vie *vie;
227 	struct vm_guest_paging *paging;
228 };
229 
230 static int
231 emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
232     void *arg)
233 {
234 	struct emulate_mem_args *ema;
235 
236 	ema = arg;
237 	return (vmm_emulate_instruction(vcpu, paddr, ema->vie, ema->paging,
238 	    mem_read, mem_write, mr));
239 }
240 
241 int
242 emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
243     struct vm_guest_paging *paging)
244 {
245 	struct emulate_mem_args ema;
246 
247 	ema.vie = vie;
248 	ema.paging = paging;
249 	return (access_memory(vcpu, paddr, emulate_mem_cb, &ema));
250 }
251 
252 struct rw_mem_args {
253 	uint64_t *val;
254 	int size;
255 	int operation;
256 };
257 
258 static int
259 rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr, void *arg)
260 {
261 	struct rw_mem_args *rma;
262 
263 	rma = arg;
264 	return (mr->handler(vcpu, rma->operation, paddr, rma->size,
265 	    rma->val, mr->arg1, mr->arg2));
266 }
267 
268 int
269 read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
270 {
271 	struct rw_mem_args rma;
272 
273 	rma.val = rval;
274 	rma.size = size;
275 	rma.operation = MEM_F_READ;
276 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
277 }
278 
279 int
280 write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
281 {
282 	struct rw_mem_args rma;
283 
284 	rma.val = &wval;
285 	rma.size = size;
286 	rma.operation = MEM_F_WRITE;
287 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
288 }
289 
290 static int
291 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
292 {
293 	struct mmio_rb_range *entry, *mrp;
294 	int err, perror;
295 
296 	err = 0;
297 
298 	mrp = malloc(sizeof(struct mmio_rb_range));
299 	if (mrp == NULL) {
300 		warn("%s: couldn't allocate memory for mrp\n",
301 		     __func__);
302 		err = ENOMEM;
303 	} else {
304 		mrp->mr_param = *memp;
305 		mrp->mr_base = memp->base;
306 		mrp->mr_end = memp->base + memp->size - 1;
307 		pthread_rwlock_wrlock(&mmio_rwlock);
308 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
309 			err = mmio_rb_add(rbt, mrp);
310 		perror = pthread_rwlock_unlock(&mmio_rwlock);
311 		assert(perror == 0);
312 		if (err)
313 			free(mrp);
314 	}
315 
316 	return (err);
317 }
318 
319 int
320 register_mem(struct mem_range *memp)
321 {
322 
323 	return (register_mem_int(&mmio_rb_root, memp));
324 }
325 
326 int
327 register_mem_fallback(struct mem_range *memp)
328 {
329 
330 	return (register_mem_int(&mmio_rb_fallback, memp));
331 }
332 
333 int
334 unregister_mem(struct mem_range *memp)
335 {
336 	struct mem_range *mr;
337 	struct mmio_rb_range *entry = NULL;
338 	int err, perror, i;
339 
340 	pthread_rwlock_wrlock(&mmio_rwlock);
341 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
342 	if (err == 0) {
343 		mr = &entry->mr_param;
344 		assert(mr->name == memp->name);
345 		assert(mr->base == memp->base && mr->size == memp->size);
346 		assert((mr->flags & MEM_F_IMMUTABLE) == 0);
347 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
348 
349 		/* flush Per-vCPU cache */
350 		for (i = 0; i < mmio_ncpu; i++) {
351 			if (mmio_hint[i] == entry)
352 				mmio_hint[i] = NULL;
353 		}
354 	}
355 	perror = pthread_rwlock_unlock(&mmio_rwlock);
356 	assert(perror == 0);
357 
358 	if (entry)
359 		free(entry);
360 
361 	return (err);
362 }
363 
364 void
365 init_mem(int ncpu)
366 {
367 
368 	mmio_ncpu = ncpu;
369 	mmio_hint = calloc(ncpu, sizeof(*mmio_hint));
370 	RB_INIT(&mmio_rb_root);
371 	RB_INIT(&mmio_rb_fallback);
372 	pthread_rwlock_init(&mmio_rwlock, NULL);
373 }
374