xref: /illumos-gate/usr/src/cmd/bhyve/common/mem.c (revision 5c4a5fe16715fb423db76577a6883b5bbecdbe45)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*
29  * This file and its contents are supplied under the terms of the
30  * Common Development and Distribution License ("CDDL"), version 1.0.
31  * You may only use this file in accordance with the terms of version
32  * 1.0 of the CDDL.
33  *
34  * A full copy of the text of the CDDL should have accompanied this
35  * source.  A copy of the CDDL is also available via the Internet at
36  * http://www.illumos.org/license/CDDL.
37  *
38  * Copyright 2020 Oxide Computer Company
39  */
40 
41 /*
42  * Memory ranges are represented with an RB tree. On insertion, the range
43  * is checked for overlaps. On lookup, the key has the same base and limit
44  * so it can be searched within the range.
45  */
46 
47 
48 #include <sys/types.h>
49 #include <sys/errno.h>
50 #include <sys/tree.h>
51 #include <machine/vmm.h>
52 
53 #include <assert.h>
54 #include <err.h>
55 #include <pthread.h>
56 #include <stdio.h>
57 #include <stdlib.h>
58 #include <vmmapi.h>
59 
60 #include "mem.h"
61 
62 struct mmio_rb_range {
63 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
64 	struct mem_range	mr_param;
65 	uint64_t                mr_base;
66 	uint64_t                mr_end;
67 };
68 
69 struct mmio_rb_tree;
70 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
71 
72 static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
73 
74 /*
75  * Per-vCPU cache. Since most accesses from a vCPU will be to
76  * consecutive addresses in a range, it makes sense to cache the
77  * result of a lookup.
78  */
79 static struct mmio_rb_range	**mmio_hint;
80 static int mmio_ncpu;
81 
82 static pthread_rwlock_t mmio_rwlock;
83 
84 static int
mmio_rb_range_compare(struct mmio_rb_range * a,struct mmio_rb_range * b)85 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
86 {
87 	if (a->mr_end < b->mr_base)
88 		return (-1);
89 	else if (a->mr_base > b->mr_end)
90 		return (1);
91 	return (0);
92 }
93 
94 static int
mmio_rb_lookup(struct mmio_rb_tree * rbt,uint64_t addr,struct mmio_rb_range ** entry)95 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
96     struct mmio_rb_range **entry)
97 {
98 	struct mmio_rb_range find, *res;
99 
100 	find.mr_base = find.mr_end = addr;
101 
102 	res = RB_FIND(mmio_rb_tree, rbt, &find);
103 
104 	if (res != NULL) {
105 		*entry = res;
106 		return (0);
107 	}
108 
109 	return (ENOENT);
110 }
111 
112 static int
mmio_rb_add(struct mmio_rb_tree * rbt,struct mmio_rb_range * new)113 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
114 {
115 	struct mmio_rb_range *overlap;
116 
117 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
118 
119 	if (overlap != NULL) {
120 #ifdef RB_DEBUG
121 		printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
122 		       "claims region already claimed for '%s'\n",
123 		       new->mr_base, new->mr_end,
124 		       overlap->mr_base, overlap->mr_end,
125 		       new->mr_param.name, overlap->mr_param.name);
126 #endif
127 
128 		return (EEXIST);
129 	}
130 
131 	return (0);
132 }
133 
134 #if 0
135 static void
136 mmio_rb_dump(struct mmio_rb_tree *rbt)
137 {
138 	int perror;
139 	struct mmio_rb_range *np;
140 
141 	pthread_rwlock_rdlock(&mmio_rwlock);
142 	RB_FOREACH(np, mmio_rb_tree, rbt) {
143 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
144 		       np->mr_param.name);
145 	}
146 	perror = pthread_rwlock_unlock(&mmio_rwlock);
147 	assert(perror == 0);
148 }
149 #endif
150 
151 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
152 
153 typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
154     void *arg);
155 
156 static int
mem_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)157 mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
158 {
159 	int error;
160 	struct mem_range *mr = arg;
161 
162 	error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
163 	    mr->arg2);
164 	return (error);
165 }
166 
167 static int
mem_write(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)168 mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
169 {
170 	int error;
171 	struct mem_range *mr = arg;
172 
173 	error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
174 	    mr->arg2);
175 	return (error);
176 }
177 
178 static int
access_memory(struct vcpu * vcpu,uint64_t paddr,mem_cb_t * cb,void * arg)179 access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
180 {
181 	struct mmio_rb_range *entry;
182 	int err, perror, immutable, vcpuid;
183 
184 	vcpuid = vcpu_id(vcpu);
185 	pthread_rwlock_rdlock(&mmio_rwlock);
186 	/*
187 	 * First check the per-vCPU cache
188 	 */
189 	if (mmio_hint[vcpuid] &&
190 	    paddr >= mmio_hint[vcpuid]->mr_base &&
191 	    paddr <= mmio_hint[vcpuid]->mr_end) {
192 		entry = mmio_hint[vcpuid];
193 	} else
194 		entry = NULL;
195 
196 	if (entry == NULL) {
197 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
198 			/* Update the per-vCPU cache */
199 			mmio_hint[vcpuid] = entry;
200 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
201 			perror = pthread_rwlock_unlock(&mmio_rwlock);
202 			assert(perror == 0);
203 			return (ESRCH);
204 		}
205 	}
206 
207 	assert(entry != NULL);
208 
209 	/*
210 	 * An 'immutable' memory range is guaranteed to be never removed
211 	 * so there is no need to hold 'mmio_rwlock' while calling the
212 	 * handler.
213 	 *
214 	 * XXX writes to the PCIR_COMMAND register can cause register_mem()
215 	 * to be called. If the guest is using PCI extended config space
216 	 * to modify the PCIR_COMMAND register then register_mem() can
217 	 * deadlock on 'mmio_rwlock'. However by registering the extended
218 	 * config space window as 'immutable' the deadlock can be avoided.
219 	 */
220 	immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
221 	if (immutable) {
222 		perror = pthread_rwlock_unlock(&mmio_rwlock);
223 		assert(perror == 0);
224 	}
225 
226 	err = cb(vcpu, paddr, &entry->mr_param, arg);
227 
228 	if (!immutable) {
229 		perror = pthread_rwlock_unlock(&mmio_rwlock);
230 		assert(perror == 0);
231 	}
232 
233 	return (err);
234 }
235 
236 static int
emulate_mem_cb(struct vcpu * vcpu,uint64_t paddr,struct mem_range * mr,void * arg)237 emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
238     void *arg)
239 {
240 	struct vm_mmio *mmio;
241 	int err = 0;
242 
243 	mmio = arg;
244 
245 	if (mmio->read != 0) {
246 		err = mem_read(vcpu, paddr, &mmio->data, mmio->bytes, mr);
247 	} else {
248 		err = mem_write(vcpu, paddr, mmio->data, mmio->bytes, mr);
249 	}
250 
251 	return (err);
252 }
253 
254 int
emulate_mem(struct vcpu * vcpu,struct vm_mmio * mmio)255 emulate_mem(struct vcpu *vcpu, struct vm_mmio *mmio)
256 {
257 	return (access_memory(vcpu, mmio->gpa, emulate_mem_cb, mmio));
258 }
259 
260 struct rw_mem_args {
261 	uint64_t *val;
262 	int size;
263 	int operation;
264 };
265 
266 static int
rw_mem_cb(struct vcpu * vcpu,uint64_t paddr,struct mem_range * mr,void * arg)267 rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
268     void *arg)
269 {
270 	struct rw_mem_args *rma;
271 
272 	rma = arg;
273 	return (mr->handler(vcpu, rma->operation, paddr, rma->size,
274 	    rma->val, mr->arg1, mr->arg2));
275 }
276 
277 int
read_mem(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size)278 read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
279 {
280 	struct rw_mem_args rma;
281 
282 	rma.val = rval;
283 	rma.size = size;
284 	rma.operation = MEM_F_READ;
285 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
286 }
287 
288 int
write_mem(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size)289 write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
290 {
291 	struct rw_mem_args rma;
292 
293 	rma.val = &wval;
294 	rma.size = size;
295 	rma.operation = MEM_F_WRITE;
296 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
297 }
298 
299 static int
register_mem_int(struct mmio_rb_tree * rbt,struct mem_range * memp)300 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
301 {
302 	struct mmio_rb_range *entry, *mrp;
303 	int err, perror;
304 
305 	err = 0;
306 
307 	mrp = malloc(sizeof(struct mmio_rb_range));
308 	if (mrp == NULL) {
309 		warn("%s: couldn't allocate memory for mrp\n",
310 		     __func__);
311 		err = ENOMEM;
312 	} else {
313 		mrp->mr_param = *memp;
314 		mrp->mr_base = memp->base;
315 		mrp->mr_end = memp->base + memp->size - 1;
316 		pthread_rwlock_wrlock(&mmio_rwlock);
317 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
318 			err = mmio_rb_add(rbt, mrp);
319 #ifndef	__FreeBSD__
320 		else /* smatch warn: possible memory leak of 'mrp' */
321 			free(mrp);
322 #endif
323 		perror = pthread_rwlock_unlock(&mmio_rwlock);
324 		assert(perror == 0);
325 		if (err)
326 			free(mrp);
327 	}
328 
329 	return (err);
330 }
331 
332 int
register_mem(struct mem_range * memp)333 register_mem(struct mem_range *memp)
334 {
335 
336 	return (register_mem_int(&mmio_rb_root, memp));
337 }
338 
339 int
register_mem_fallback(struct mem_range * memp)340 register_mem_fallback(struct mem_range *memp)
341 {
342 
343 	return (register_mem_int(&mmio_rb_fallback, memp));
344 }
345 
346 int
unregister_mem(struct mem_range * memp)347 unregister_mem(struct mem_range *memp)
348 {
349 	struct mem_range *mr;
350 	struct mmio_rb_range *entry = NULL;
351 	int err, perror, i;
352 
353 	pthread_rwlock_wrlock(&mmio_rwlock);
354 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
355 	if (err == 0) {
356 		mr = &entry->mr_param;
357 		assert(mr->name == memp->name);
358 		assert(mr->base == memp->base && mr->size == memp->size);
359 		assert((mr->flags & MEM_F_IMMUTABLE) == 0);
360 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
361 
362 		/* flush Per-vCPU cache */
363 		for (i = 0; i < mmio_ncpu; i++) {
364 			if (mmio_hint[i] == entry)
365 				mmio_hint[i] = NULL;
366 		}
367 	}
368 	perror = pthread_rwlock_unlock(&mmio_rwlock);
369 	assert(perror == 0);
370 
371 	if (entry)
372 		free(entry);
373 
374 	return (err);
375 }
376 
377 void
init_mem(int ncpu)378 init_mem(int ncpu)
379 {
380 
381 	mmio_ncpu = ncpu;
382 	mmio_hint = calloc(ncpu, sizeof(*mmio_hint));
383 	RB_INIT(&mmio_rb_root);
384 	RB_INIT(&mmio_rb_fallback);
385 	pthread_rwlock_init(&mmio_rwlock, NULL);
386 }
387