xref: /freebsd/usr.sbin/bhyve/mem.c (revision 730cecb05aaf016ac52ef7cfc691ccec3a0408cd)
1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * Memory ranges are represented with an RB tree. On insertion, the range
31  * is checked for overlaps. On lookup, the key has the same base and limit
32  * so it can be searched within the range.
33  *
34  * It is assumed that all setup of ranges takes place in single-threaded
35  * mode before vCPUs have been started. As such, no locks are used on the
36  * RB tree. If this is no longer the case, then a r/w lock could be used,
37  * with readers on the lookup and a writer if the tree needs to be changed
38  * (and per vCPU caches flushed)
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/types.h>
45 #include <sys/tree.h>
46 #include <sys/errno.h>
47 #include <machine/vmm.h>
48 
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <assert.h>
52 
53 #include "mem.h"
54 
55 struct mmio_rb_range {
56 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
57 	struct mem_range	mr_param;
58 	uint64_t                mr_base;
59 	uint64_t                mr_end;
60 };
61 
62 struct mmio_rb_tree;
63 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
64 
65 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
66 
67 /*
68  * Per-vCPU cache. Since most accesses from a vCPU will be to
69  * consecutive addresses in a range, it makes sense to cache the
70  * result of a lookup.
71  */
72 static struct mmio_rb_range	*mmio_hint[VM_MAXCPU];
73 
74 static int
75 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
76 {
77 	if (a->mr_end < b->mr_base)
78 		return (-1);
79 	else if (a->mr_base > b->mr_end)
80 		return (1);
81 	return (0);
82 }
83 
84 static int
85 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
86     struct mmio_rb_range **entry)
87 {
88 	struct mmio_rb_range find, *res;
89 
90 	find.mr_base = find.mr_end = addr;
91 
92 	res = RB_FIND(mmio_rb_tree, rbt, &find);
93 
94 	if (res != NULL) {
95 		*entry = res;
96 		return (0);
97 	}
98 
99 	return (ENOENT);
100 }
101 
102 static int
103 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
104 {
105 	struct mmio_rb_range *overlap;
106 
107 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
108 
109 	if (overlap != NULL) {
110 #ifdef RB_DEBUG
111 		printf("overlap detected: new %lx:%lx, tree %lx:%lx\n",
112 		       new->mr_base, new->mr_end,
113 		       overlap->mr_base, overlap->mr_end);
114 #endif
115 
116 		return (EEXIST);
117 	}
118 
119 	return (0);
120 }
121 
122 #if 0
123 static void
124 mmio_rb_dump(struct mmio_rb_tree *rbt)
125 {
126 	struct mmio_rb_range *np;
127 
128 	RB_FOREACH(np, mmio_rb_tree, rbt) {
129 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
130 		       np->mr_param.name);
131 	}
132 }
133 #endif
134 
135 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
136 
137 static int
138 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
139 {
140 	int error;
141 	struct mem_range *mr = arg;
142 
143 	error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
144 			       rval, mr->arg1, mr->arg2);
145 	return (error);
146 }
147 
148 static int
149 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
150 {
151 	int error;
152 	struct mem_range *mr = arg;
153 
154 	error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
155 			       &wval, mr->arg1, mr->arg2);
156 	return (error);
157 }
158 
159 int
160 emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie)
161 {
162 	struct mmio_rb_range *entry;
163 	int err;
164 
165 	/*
166 	 * First check the per-vCPU cache
167 	 */
168 	if (mmio_hint[vcpu] &&
169 	    paddr >= mmio_hint[vcpu]->mr_base &&
170 	    paddr <= mmio_hint[vcpu]->mr_end) {
171 		entry = mmio_hint[vcpu];
172 	} else
173 		entry = NULL;
174 
175 	if (entry == NULL) {
176 		if (!mmio_rb_lookup(&mmio_rb_root, paddr, &entry)) {
177 			/* Update the per-vCPU cache */
178 			mmio_hint[vcpu] = entry;
179 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
180 			return (ESRCH);
181 		}
182 	}
183 
184 	assert(entry != NULL);
185 	err = vmm_emulate_instruction(ctx, vcpu, paddr, vie,
186 				      mem_read, mem_write, &entry->mr_param);
187 	return (err);
188 }
189 
190 static int
191 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
192 {
193 	struct mmio_rb_range *mrp;
194 	int		err;
195 
196 	err = 0;
197 
198 	mrp = malloc(sizeof(struct mmio_rb_range));
199 
200 	if (mrp != NULL) {
201 		mrp->mr_param = *memp;
202 		mrp->mr_base = memp->base;
203 		mrp->mr_end = memp->base + memp->size - 1;
204 
205 		err = mmio_rb_add(rbt, mrp);
206 		if (err)
207 			free(mrp);
208 	} else
209 		err = ENOMEM;
210 
211 	return (err);
212 }
213 
214 int
215 register_mem(struct mem_range *memp)
216 {
217 
218 	return (register_mem_int(&mmio_rb_root, memp));
219 }
220 
221 int
222 register_mem_fallback(struct mem_range *memp)
223 {
224 
225 	return (register_mem_int(&mmio_rb_fallback, memp));
226 }
227 
228 void
229 init_mem(void)
230 {
231 
232 	RB_INIT(&mmio_rb_root);
233 	RB_INIT(&mmio_rb_fallback);
234 }
235