xref: /freebsd/usr.sbin/bhyve/mem.c (revision a223d3ed90bfe313ce5987d468a25a915d7d1254)
1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * Memory ranges are represented with an RB tree. On insertion, the range
31  * is checked for overlaps. On lookup, the key has the same base and limit
32  * so it can be searched within the range.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/types.h>
39 #include <sys/tree.h>
40 #include <sys/errno.h>
41 #include <machine/vmm.h>
42 #include <machine/vmm_instruction_emul.h>
43 
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <assert.h>
47 #include <pthread.h>
48 
49 #include "mem.h"
50 
51 struct mmio_rb_range {
52 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
53 	struct mem_range	mr_param;
54 	uint64_t                mr_base;
55 	uint64_t                mr_end;
56 };
57 
58 struct mmio_rb_tree;
59 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
60 
61 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
62 
63 /*
64  * Per-vCPU cache. Since most accesses from a vCPU will be to
65  * consecutive addresses in a range, it makes sense to cache the
66  * result of a lookup.
67  */
68 static struct mmio_rb_range	*mmio_hint[VM_MAXCPU];
69 
70 static pthread_rwlock_t mmio_rwlock;
71 
72 static int
73 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
74 {
75 	if (a->mr_end < b->mr_base)
76 		return (-1);
77 	else if (a->mr_base > b->mr_end)
78 		return (1);
79 	return (0);
80 }
81 
82 static int
83 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
84     struct mmio_rb_range **entry)
85 {
86 	struct mmio_rb_range find, *res;
87 
88 	find.mr_base = find.mr_end = addr;
89 
90 	res = RB_FIND(mmio_rb_tree, rbt, &find);
91 
92 	if (res != NULL) {
93 		*entry = res;
94 		return (0);
95 	}
96 
97 	return (ENOENT);
98 }
99 
100 static int
101 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
102 {
103 	struct mmio_rb_range *overlap;
104 
105 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
106 
107 	if (overlap != NULL) {
108 #ifdef RB_DEBUG
109 		printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
110 		       new->mr_base, new->mr_end,
111 		       overlap->mr_base, overlap->mr_end);
112 #endif
113 
114 		return (EEXIST);
115 	}
116 
117 	return (0);
118 }
119 
120 #if 0
121 static void
122 mmio_rb_dump(struct mmio_rb_tree *rbt)
123 {
124 	struct mmio_rb_range *np;
125 
126 	pthread_rwlock_rdlock(&mmio_rwlock);
127 	RB_FOREACH(np, mmio_rb_tree, rbt) {
128 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
129 		       np->mr_param.name);
130 	}
131 	pthread_rwlock_unlock(&mmio_rwlock);
132 }
133 #endif
134 
135 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
136 
137 static int
138 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
139 {
140 	int error;
141 	struct mem_range *mr = arg;
142 
143 	error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
144 			       rval, mr->arg1, mr->arg2);
145 	return (error);
146 }
147 
148 static int
149 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
150 {
151 	int error;
152 	struct mem_range *mr = arg;
153 
154 	error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
155 			       &wval, mr->arg1, mr->arg2);
156 	return (error);
157 }
158 
159 int
160 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie)
161 {
162 	struct mmio_rb_range *entry;
163 	int err;
164 
165 	pthread_rwlock_rdlock(&mmio_rwlock);
166 	/*
167 	 * First check the per-vCPU cache
168 	 */
169 	if (mmio_hint[vcpu] &&
170 	    paddr >= mmio_hint[vcpu]->mr_base &&
171 	    paddr <= mmio_hint[vcpu]->mr_end) {
172 		entry = mmio_hint[vcpu];
173 	} else
174 		entry = NULL;
175 
176 	if (entry == NULL) {
177 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
178 			/* Update the per-vCPU cache */
179 			mmio_hint[vcpu] = entry;
180 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
181 			pthread_rwlock_unlock(&mmio_rwlock);
182 			return (ESRCH);
183 		}
184 	}
185 
186 	assert(entry != NULL);
187 	err = vmm_emulate_instruction(ctx, vcpu, paddr, vie,
188 				      mem_read, mem_write, &entry->mr_param);
189 	pthread_rwlock_unlock(&mmio_rwlock);
190 
191 	return (err);
192 }
193 
194 static int
195 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
196 {
197 	struct mmio_rb_range *entry, *mrp;
198 	int		err;
199 
200 	err = 0;
201 
202 	mrp = malloc(sizeof(struct mmio_rb_range));
203 
204 	if (mrp != NULL) {
205 		mrp->mr_param = *memp;
206 		mrp->mr_base = memp->base;
207 		mrp->mr_end = memp->base + memp->size - 1;
208 		pthread_rwlock_wrlock(&mmio_rwlock);
209 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
210 			err = mmio_rb_add(rbt, mrp);
211 		pthread_rwlock_unlock(&mmio_rwlock);
212 		if (err)
213 			free(mrp);
214 	} else
215 		err = ENOMEM;
216 
217 	return (err);
218 }
219 
220 int
221 register_mem(struct mem_range *memp)
222 {
223 
224 	return (register_mem_int(&mmio_rb_root, memp));
225 }
226 
227 int
228 register_mem_fallback(struct mem_range *memp)
229 {
230 
231 	return (register_mem_int(&mmio_rb_fallback, memp));
232 }
233 
234 int
235 unregister_mem(struct mem_range *memp)
236 {
237 	struct mem_range *mr;
238 	struct mmio_rb_range *entry = NULL;
239 	int err, i;
240 
241 	pthread_rwlock_wrlock(&mmio_rwlock);
242 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
243 	if (err == 0) {
244 		mr = &entry->mr_param;
245 		assert(mr->name == memp->name);
246 		assert(mr->base == memp->base && mr->size == memp->size);
247 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
248 
249 		/* flush Per-vCPU cache */
250 		for (i=0; i < VM_MAXCPU; i++) {
251 			if (mmio_hint[i] == entry)
252 				mmio_hint[i] = NULL;
253 		}
254 	}
255 	pthread_rwlock_unlock(&mmio_rwlock);
256 
257 	if (entry)
258 		free(entry);
259 
260 	return (err);
261 }
262 
263 void
264 init_mem(void)
265 {
266 
267 	RB_INIT(&mmio_rb_root);
268 	RB_INIT(&mmio_rb_fallback);
269 	pthread_rwlock_init(&mmio_rwlock, NULL);
270 }
271