xref: /freebsd/usr.sbin/bhyve/mem.c (revision 4d65a7c6951cea0333f1a0c1b32c38489cdfa6c5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Memory ranges are represented with an RB tree. On insertion, the range
31  * is checked for overlaps. On lookup, the key has the same base and limit
32  * so it can be searched within the range.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/tree.h>
38 #include <machine/vmm.h>
39 #include <machine/vmm_instruction_emul.h>
40 
41 #include <assert.h>
42 #include <err.h>
43 #include <pthread.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <vmmapi.h>
47 
48 #include "mem.h"
49 
50 struct mmio_rb_range {
51 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
52 	struct mem_range	mr_param;
53 	uint64_t                mr_base;
54 	uint64_t                mr_end;
55 };
56 
57 struct mmio_rb_tree;
58 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
59 
60 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
61 
62 /*
63  * Per-vCPU cache. Since most accesses from a vCPU will be to
64  * consecutive addresses in a range, it makes sense to cache the
65  * result of a lookup.
66  */
67 static struct mmio_rb_range	**mmio_hint;
68 static int mmio_ncpu;
69 
70 static pthread_rwlock_t mmio_rwlock;
71 
72 static int
mmio_rb_range_compare(struct mmio_rb_range * a,struct mmio_rb_range * b)73 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
74 {
75 	if (a->mr_end < b->mr_base)
76 		return (-1);
77 	else if (a->mr_base > b->mr_end)
78 		return (1);
79 	return (0);
80 }
81 
82 static int
mmio_rb_lookup(struct mmio_rb_tree * rbt,uint64_t addr,struct mmio_rb_range ** entry)83 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
84     struct mmio_rb_range **entry)
85 {
86 	struct mmio_rb_range find, *res;
87 
88 	find.mr_base = find.mr_end = addr;
89 
90 	res = RB_FIND(mmio_rb_tree, rbt, &find);
91 
92 	if (res != NULL) {
93 		*entry = res;
94 		return (0);
95 	}
96 
97 	return (ENOENT);
98 }
99 
100 static int
mmio_rb_add(struct mmio_rb_tree * rbt,struct mmio_rb_range * new)101 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
102 {
103 	struct mmio_rb_range *overlap;
104 
105 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
106 
107 	if (overlap != NULL) {
108 #ifdef RB_DEBUG
109 		printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
110 		       "claims region already claimed for '%s'\n",
111 		       new->mr_base, new->mr_end,
112 		       overlap->mr_base, overlap->mr_end,
113 		       new->mr_param.name, overlap->mr_param.name);
114 #endif
115 
116 		return (EEXIST);
117 	}
118 
119 	return (0);
120 }
121 
122 #if 0
123 static void
124 mmio_rb_dump(struct mmio_rb_tree *rbt)
125 {
126 	int perror;
127 	struct mmio_rb_range *np;
128 
129 	pthread_rwlock_rdlock(&mmio_rwlock);
130 	RB_FOREACH(np, mmio_rb_tree, rbt) {
131 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
132 		       np->mr_param.name);
133 	}
134 	perror = pthread_rwlock_unlock(&mmio_rwlock);
135 	assert(perror == 0);
136 }
137 #endif
138 
139 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
140 
141 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
142     void *arg);
143 
144 static int
mem_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)145 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
146 {
147 	int error;
148 	struct mem_range *mr = arg;
149 
150 	error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
151 	    mr->arg2);
152 	return (error);
153 }
154 
155 static int
mem_write(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)156 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
157 {
158 	int error;
159 	struct mem_range *mr = arg;
160 
161 	error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
162 	    mr->arg2);
163 	return (error);
164 }
165 
166 static int
access_memory(struct vcpu * vcpu,uint64_t paddr,mem_cb_t * cb,void * arg)167 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
168 {
169 	struct mmio_rb_range *entry;
170 	int err, perror, immutable, vcpuid;
171 
172 	vcpuid = vcpu_id(vcpu);
173 	pthread_rwlock_rdlock(&mmio_rwlock);
174 	/*
175 	 * First check the per-vCPU cache
176 	 */
177 	if (mmio_hint[vcpuid] &&
178 	    paddr >= mmio_hint[vcpuid]->mr_base &&
179 	    paddr <= mmio_hint[vcpuid]->mr_end) {
180 		entry = mmio_hint[vcpuid];
181 	} else
182 		entry = NULL;
183 
184 	if (entry == NULL) {
185 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
186 			/* Update the per-vCPU cache */
187 			mmio_hint[vcpuid] = entry;
188 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
189 			perror = pthread_rwlock_unlock(&mmio_rwlock);
190 			assert(perror == 0);
191 			return (ESRCH);
192 		}
193 	}
194 
195 	assert(entry != NULL);
196 
197 	/*
198 	 * An 'immutable' memory range is guaranteed to be never removed
199 	 * so there is no need to hold 'mmio_rwlock' while calling the
200 	 * handler.
201 	 *
202 	 * XXX writes to the PCIR_COMMAND register can cause register_mem()
203 	 * to be called. If the guest is using PCI extended config space
204 	 * to modify the PCIR_COMMAND register then register_mem() can
205 	 * deadlock on 'mmio_rwlock'. However by registering the extended
206 	 * config space window as 'immutable' the deadlock can be avoided.
207 	 */
208 	immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
209 	if (immutable) {
210 		perror = pthread_rwlock_unlock(&mmio_rwlock);
211 		assert(perror == 0);
212 	}
213 
214 	err = cb(vcpu, paddr, &entry->mr_param, arg);
215 
216 	if (!immutable) {
217 		perror = pthread_rwlock_unlock(&mmio_rwlock);
218 		assert(perror == 0);
219 	}
220 
221 	return (err);
222 }
223 
224 struct emulate_mem_args {
225 	struct vie *vie;
226 	struct vm_guest_paging *paging;
227 };
228 
229 static int
emulate_mem_cb(struct vcpu * vcpu,uint64_t paddr,struct mem_range * mr,void * arg)230 emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
231     void *arg)
232 {
233 	struct emulate_mem_args *ema;
234 
235 	ema = arg;
236 	return (vmm_emulate_instruction(vcpu, paddr, ema->vie, ema->paging,
237 	    mem_read, mem_write, mr));
238 }
239 
240 int
emulate_mem(struct vcpu * vcpu,uint64_t paddr,struct vie * vie,struct vm_guest_paging * paging)241 emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
242     struct vm_guest_paging *paging)
243 {
244 	struct emulate_mem_args ema;
245 
246 	ema.vie = vie;
247 	ema.paging = paging;
248 	return (access_memory(vcpu, paddr, emulate_mem_cb, &ema));
249 }
250 
251 struct rw_mem_args {
252 	uint64_t *val;
253 	int size;
254 	int operation;
255 };
256 
257 static int
rw_mem_cb(struct vcpu * vcpu,uint64_t paddr,struct mem_range * mr,void * arg)258 rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr, void *arg)
259 {
260 	struct rw_mem_args *rma;
261 
262 	rma = arg;
263 	return (mr->handler(vcpu, rma->operation, paddr, rma->size,
264 	    rma->val, mr->arg1, mr->arg2));
265 }
266 
267 int
read_mem(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size)268 read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
269 {
270 	struct rw_mem_args rma;
271 
272 	rma.val = rval;
273 	rma.size = size;
274 	rma.operation = MEM_F_READ;
275 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
276 }
277 
278 int
write_mem(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size)279 write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
280 {
281 	struct rw_mem_args rma;
282 
283 	rma.val = &wval;
284 	rma.size = size;
285 	rma.operation = MEM_F_WRITE;
286 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
287 }
288 
289 static int
register_mem_int(struct mmio_rb_tree * rbt,struct mem_range * memp)290 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
291 {
292 	struct mmio_rb_range *entry, *mrp;
293 	int err, perror;
294 
295 	err = 0;
296 
297 	mrp = malloc(sizeof(struct mmio_rb_range));
298 	if (mrp == NULL) {
299 		warn("%s: couldn't allocate memory for mrp\n",
300 		     __func__);
301 		err = ENOMEM;
302 	} else {
303 		mrp->mr_param = *memp;
304 		mrp->mr_base = memp->base;
305 		mrp->mr_end = memp->base + memp->size - 1;
306 		pthread_rwlock_wrlock(&mmio_rwlock);
307 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
308 			err = mmio_rb_add(rbt, mrp);
309 		perror = pthread_rwlock_unlock(&mmio_rwlock);
310 		assert(perror == 0);
311 		if (err)
312 			free(mrp);
313 	}
314 
315 	return (err);
316 }
317 
318 int
register_mem(struct mem_range * memp)319 register_mem(struct mem_range *memp)
320 {
321 
322 	return (register_mem_int(&mmio_rb_root, memp));
323 }
324 
325 int
register_mem_fallback(struct mem_range * memp)326 register_mem_fallback(struct mem_range *memp)
327 {
328 
329 	return (register_mem_int(&mmio_rb_fallback, memp));
330 }
331 
332 int
unregister_mem(struct mem_range * memp)333 unregister_mem(struct mem_range *memp)
334 {
335 	struct mem_range *mr;
336 	struct mmio_rb_range *entry = NULL;
337 	int err, perror, i;
338 
339 	pthread_rwlock_wrlock(&mmio_rwlock);
340 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
341 	if (err == 0) {
342 		mr = &entry->mr_param;
343 		assert(mr->name == memp->name);
344 		assert(mr->base == memp->base && mr->size == memp->size);
345 		assert((mr->flags & MEM_F_IMMUTABLE) == 0);
346 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
347 
348 		/* flush Per-vCPU cache */
349 		for (i = 0; i < mmio_ncpu; i++) {
350 			if (mmio_hint[i] == entry)
351 				mmio_hint[i] = NULL;
352 		}
353 	}
354 	perror = pthread_rwlock_unlock(&mmio_rwlock);
355 	assert(perror == 0);
356 
357 	if (entry)
358 		free(entry);
359 
360 	return (err);
361 }
362 
363 void
init_mem(int ncpu)364 init_mem(int ncpu)
365 {
366 
367 	mmio_ncpu = ncpu;
368 	mmio_hint = calloc(ncpu, sizeof(*mmio_hint));
369 	RB_INIT(&mmio_rb_root);
370 	RB_INIT(&mmio_rb_fallback);
371 	pthread_rwlock_init(&mmio_rwlock, NULL);
372 }
373