xref: /freebsd/usr.sbin/bhyve/mem.c (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * Memory ranges are represented with an RB tree. On insertion, the range
31  * is checked for overlaps. On lookup, the key has the same base and limit
32  * so it can be searched within the range.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/types.h>
39 #include <sys/tree.h>
40 #include <sys/errno.h>
41 #include <machine/vmm.h>
42 #include <machine/vmm_instruction_emul.h>
43 
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <assert.h>
47 #include <pthread.h>
48 
49 #include "mem.h"
50 
51 struct mmio_rb_range {
52 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
53 	struct mem_range	mr_param;
54 	uint64_t                mr_base;
55 	uint64_t                mr_end;
56 };
57 
58 struct mmio_rb_tree;
59 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
60 
61 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
62 
63 /*
64  * Per-vCPU cache. Since most accesses from a vCPU will be to
65  * consecutive addresses in a range, it makes sense to cache the
66  * result of a lookup.
67  */
68 static struct mmio_rb_range	*mmio_hint[VM_MAXCPU];
69 
70 static pthread_rwlock_t mmio_rwlock;
71 
72 static int
73 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
74 {
75 	if (a->mr_end < b->mr_base)
76 		return (-1);
77 	else if (a->mr_base > b->mr_end)
78 		return (1);
79 	return (0);
80 }
81 
82 static int
83 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
84     struct mmio_rb_range **entry)
85 {
86 	struct mmio_rb_range find, *res;
87 
88 	find.mr_base = find.mr_end = addr;
89 
90 	res = RB_FIND(mmio_rb_tree, rbt, &find);
91 
92 	if (res != NULL) {
93 		*entry = res;
94 		return (0);
95 	}
96 
97 	return (ENOENT);
98 }
99 
100 static int
101 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
102 {
103 	struct mmio_rb_range *overlap;
104 
105 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
106 
107 	if (overlap != NULL) {
108 #ifdef RB_DEBUG
109 		printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
110 		       new->mr_base, new->mr_end,
111 		       overlap->mr_base, overlap->mr_end);
112 #endif
113 
114 		return (EEXIST);
115 	}
116 
117 	return (0);
118 }
119 
120 #if 0
121 static void
122 mmio_rb_dump(struct mmio_rb_tree *rbt)
123 {
124 	struct mmio_rb_range *np;
125 
126 	pthread_rwlock_rdlock(&mmio_rwlock);
127 	RB_FOREACH(np, mmio_rb_tree, rbt) {
128 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
129 		       np->mr_param.name);
130 	}
131 	pthread_rwlock_unlock(&mmio_rwlock);
132 }
133 #endif
134 
135 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
136 
137 static int
138 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
139 {
140 	int error;
141 	struct mem_range *mr = arg;
142 
143 	error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
144 			       rval, mr->arg1, mr->arg2);
145 	return (error);
146 }
147 
148 static int
149 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
150 {
151 	int error;
152 	struct mem_range *mr = arg;
153 
154 	error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
155 			       &wval, mr->arg1, mr->arg2);
156 	return (error);
157 }
158 
159 int
160 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
161     struct vm_guest_paging *paging)
162 
163 {
164 	struct mmio_rb_range *entry;
165 	int err, immutable;
166 
167 	pthread_rwlock_rdlock(&mmio_rwlock);
168 	/*
169 	 * First check the per-vCPU cache
170 	 */
171 	if (mmio_hint[vcpu] &&
172 	    paddr >= mmio_hint[vcpu]->mr_base &&
173 	    paddr <= mmio_hint[vcpu]->mr_end) {
174 		entry = mmio_hint[vcpu];
175 	} else
176 		entry = NULL;
177 
178 	if (entry == NULL) {
179 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
180 			/* Update the per-vCPU cache */
181 			mmio_hint[vcpu] = entry;
182 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
183 			pthread_rwlock_unlock(&mmio_rwlock);
184 			return (ESRCH);
185 		}
186 	}
187 
188 	assert(entry != NULL);
189 
190 	/*
191 	 * An 'immutable' memory range is guaranteed to be never removed
192 	 * so there is no need to hold 'mmio_rwlock' while calling the
193 	 * handler.
194 	 *
195 	 * XXX writes to the PCIR_COMMAND register can cause register_mem()
196 	 * to be called. If the guest is using PCI extended config space
197 	 * to modify the PCIR_COMMAND register then register_mem() can
198 	 * deadlock on 'mmio_rwlock'. However by registering the extended
199 	 * config space window as 'immutable' the deadlock can be avoided.
200 	 */
201 	immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
202 	if (immutable)
203 		pthread_rwlock_unlock(&mmio_rwlock);
204 
205 	err = vmm_emulate_instruction(ctx, vcpu, paddr, vie, paging,
206 				      mem_read, mem_write, &entry->mr_param);
207 
208 	if (!immutable)
209 		pthread_rwlock_unlock(&mmio_rwlock);
210 
211 	return (err);
212 }
213 
214 static int
215 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
216 {
217 	struct mmio_rb_range *entry, *mrp;
218 	int		err;
219 
220 	err = 0;
221 
222 	mrp = malloc(sizeof(struct mmio_rb_range));
223 
224 	if (mrp != NULL) {
225 		mrp->mr_param = *memp;
226 		mrp->mr_base = memp->base;
227 		mrp->mr_end = memp->base + memp->size - 1;
228 		pthread_rwlock_wrlock(&mmio_rwlock);
229 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
230 			err = mmio_rb_add(rbt, mrp);
231 		pthread_rwlock_unlock(&mmio_rwlock);
232 		if (err)
233 			free(mrp);
234 	} else
235 		err = ENOMEM;
236 
237 	return (err);
238 }
239 
240 int
241 register_mem(struct mem_range *memp)
242 {
243 
244 	return (register_mem_int(&mmio_rb_root, memp));
245 }
246 
247 int
248 register_mem_fallback(struct mem_range *memp)
249 {
250 
251 	return (register_mem_int(&mmio_rb_fallback, memp));
252 }
253 
254 int
255 unregister_mem(struct mem_range *memp)
256 {
257 	struct mem_range *mr;
258 	struct mmio_rb_range *entry = NULL;
259 	int err, i;
260 
261 	pthread_rwlock_wrlock(&mmio_rwlock);
262 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
263 	if (err == 0) {
264 		mr = &entry->mr_param;
265 		assert(mr->name == memp->name);
266 		assert(mr->base == memp->base && mr->size == memp->size);
267 		assert((mr->flags & MEM_F_IMMUTABLE) == 0);
268 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
269 
270 		/* flush Per-vCPU cache */
271 		for (i=0; i < VM_MAXCPU; i++) {
272 			if (mmio_hint[i] == entry)
273 				mmio_hint[i] = NULL;
274 		}
275 	}
276 	pthread_rwlock_unlock(&mmio_rwlock);
277 
278 	if (entry)
279 		free(entry);
280 
281 	return (err);
282 }
283 
284 void
285 init_mem(void)
286 {
287 
288 	RB_INIT(&mmio_rb_root);
289 	RB_INIT(&mmio_rb_fallback);
290 	pthread_rwlock_init(&mmio_rwlock, NULL);
291 }
292