xref: /freebsd/usr.sbin/bhyve/mem.c (revision 7d9ef309bd09c061e9cad8ace6f7bb4c60f087e6)
14d1e669cSPeter Grehan /*-
21de7b4b8SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
31de7b4b8SPedro F. Giffuni  *
44d1e669cSPeter Grehan  * Copyright (c) 2012 NetApp, Inc.
54d1e669cSPeter Grehan  * All rights reserved.
64d1e669cSPeter Grehan  *
74d1e669cSPeter Grehan  * Redistribution and use in source and binary forms, with or without
84d1e669cSPeter Grehan  * modification, are permitted provided that the following conditions
94d1e669cSPeter Grehan  * are met:
104d1e669cSPeter Grehan  * 1. Redistributions of source code must retain the above copyright
114d1e669cSPeter Grehan  *    notice, this list of conditions and the following disclaimer.
124d1e669cSPeter Grehan  * 2. Redistributions in binary form must reproduce the above copyright
134d1e669cSPeter Grehan  *    notice, this list of conditions and the following disclaimer in the
144d1e669cSPeter Grehan  *    documentation and/or other materials provided with the distribution.
154d1e669cSPeter Grehan  *
164d1e669cSPeter Grehan  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
174d1e669cSPeter Grehan  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184d1e669cSPeter Grehan  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194d1e669cSPeter Grehan  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
204d1e669cSPeter Grehan  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214d1e669cSPeter Grehan  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224d1e669cSPeter Grehan  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234d1e669cSPeter Grehan  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244d1e669cSPeter Grehan  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254d1e669cSPeter Grehan  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264d1e669cSPeter Grehan  * SUCH DAMAGE.
274d1e669cSPeter Grehan  *
284d1e669cSPeter Grehan  * $FreeBSD$
294d1e669cSPeter Grehan  */
304d1e669cSPeter Grehan 
314d1e669cSPeter Grehan /*
324d1e669cSPeter Grehan  * Memory ranges are represented with an RB tree. On insertion, the range
334d1e669cSPeter Grehan  * is checked for overlaps. On lookup, the key has the same base and limit
344d1e669cSPeter Grehan  * so it can be searched within the range.
354d1e669cSPeter Grehan  */
364d1e669cSPeter Grehan 
374d1e669cSPeter Grehan #include <sys/cdefs.h>
384d1e669cSPeter Grehan __FBSDID("$FreeBSD$");
394d1e669cSPeter Grehan 
404d1e669cSPeter Grehan #include <sys/types.h>
414d1e669cSPeter Grehan #include <sys/errno.h>
42f2b5dc3aSMarcelo Araujo #include <sys/tree.h>
434d1e669cSPeter Grehan #include <machine/vmm.h>
44e813a873SNeel Natu #include <machine/vmm_instruction_emul.h>
454d1e669cSPeter Grehan 
46f2b5dc3aSMarcelo Araujo #include <assert.h>
47f2b5dc3aSMarcelo Araujo #include <err.h>
48f2b5dc3aSMarcelo Araujo #include <pthread.h>
494d1e669cSPeter Grehan #include <stdio.h>
504d1e669cSPeter Grehan #include <stdlib.h>
51*7d9ef309SJohn Baldwin #include <vmmapi.h>
524d1e669cSPeter Grehan 
534d1e669cSPeter Grehan #include "mem.h"
544d1e669cSPeter Grehan 
554d1e669cSPeter Grehan struct mmio_rb_range {
564d1e669cSPeter Grehan 	RB_ENTRY(mmio_rb_range)	mr_link;	/* RB tree links */
574d1e669cSPeter Grehan 	struct mem_range	mr_param;
584d1e669cSPeter Grehan 	uint64_t                mr_base;
594d1e669cSPeter Grehan 	uint64_t                mr_end;
604d1e669cSPeter Grehan };
614d1e669cSPeter Grehan 
624d1e669cSPeter Grehan struct mmio_rb_tree;
634d1e669cSPeter Grehan RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
644d1e669cSPeter Grehan 
6537045dfaSMark Johnston static RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
664d1e669cSPeter Grehan 
674d1e669cSPeter Grehan /*
684d1e669cSPeter Grehan  * Per-vCPU cache. Since most accesses from a vCPU will be to
694d1e669cSPeter Grehan  * consecutive addresses in a range, it makes sense to cache the
704d1e669cSPeter Grehan  * result of a lookup.
714d1e669cSPeter Grehan  */
72730510dcSJohn Baldwin static struct mmio_rb_range	**mmio_hint;
73730510dcSJohn Baldwin static int mmio_ncpu;
744d1e669cSPeter Grehan 
75ae551da6SNeel Natu static pthread_rwlock_t mmio_rwlock;
76028d9311SNeel Natu 
774d1e669cSPeter Grehan static int
784d1e669cSPeter Grehan mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
794d1e669cSPeter Grehan {
804d1e669cSPeter Grehan 	if (a->mr_end < b->mr_base)
814d1e669cSPeter Grehan 		return (-1);
824d1e669cSPeter Grehan 	else if (a->mr_base > b->mr_end)
834d1e669cSPeter Grehan 		return (1);
844d1e669cSPeter Grehan 	return (0);
854d1e669cSPeter Grehan }
864d1e669cSPeter Grehan 
874d1e669cSPeter Grehan static int
880ab13648SPeter Grehan mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
890ab13648SPeter Grehan     struct mmio_rb_range **entry)
904d1e669cSPeter Grehan {
914d1e669cSPeter Grehan 	struct mmio_rb_range find, *res;
924d1e669cSPeter Grehan 
934d1e669cSPeter Grehan 	find.mr_base = find.mr_end = addr;
944d1e669cSPeter Grehan 
950ab13648SPeter Grehan 	res = RB_FIND(mmio_rb_tree, rbt, &find);
964d1e669cSPeter Grehan 
974d1e669cSPeter Grehan 	if (res != NULL) {
984d1e669cSPeter Grehan 		*entry = res;
994d1e669cSPeter Grehan 		return (0);
1004d1e669cSPeter Grehan 	}
1014d1e669cSPeter Grehan 
1024d1e669cSPeter Grehan 	return (ENOENT);
1034d1e669cSPeter Grehan }
1044d1e669cSPeter Grehan 
1054d1e669cSPeter Grehan static int
1060ab13648SPeter Grehan mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
1074d1e669cSPeter Grehan {
1084d1e669cSPeter Grehan 	struct mmio_rb_range *overlap;
1094d1e669cSPeter Grehan 
1100ab13648SPeter Grehan 	overlap = RB_INSERT(mmio_rb_tree, rbt, new);
1114d1e669cSPeter Grehan 
1124d1e669cSPeter Grehan 	if (overlap != NULL) {
1134d1e669cSPeter Grehan #ifdef RB_DEBUG
114efec757bSRobert Crowston 		printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' "
115efec757bSRobert Crowston 		       "claims region already claimed for '%s'\n",
1164d1e669cSPeter Grehan 		       new->mr_base, new->mr_end,
117efec757bSRobert Crowston 		       overlap->mr_base, overlap->mr_end,
118efec757bSRobert Crowston 		       new->mr_param.name, overlap->mr_param.name);
1194d1e669cSPeter Grehan #endif
1204d1e669cSPeter Grehan 
1214d1e669cSPeter Grehan 		return (EEXIST);
1224d1e669cSPeter Grehan 	}
1234d1e669cSPeter Grehan 
1244d1e669cSPeter Grehan 	return (0);
1254d1e669cSPeter Grehan }
1264d1e669cSPeter Grehan 
1274d1e669cSPeter Grehan #if 0
1284d1e669cSPeter Grehan static void
1290ab13648SPeter Grehan mmio_rb_dump(struct mmio_rb_tree *rbt)
1304d1e669cSPeter Grehan {
1315f4c83abSMarcelo Araujo 	int perror;
1324d1e669cSPeter Grehan 	struct mmio_rb_range *np;
1334d1e669cSPeter Grehan 
134ae551da6SNeel Natu 	pthread_rwlock_rdlock(&mmio_rwlock);
1350ab13648SPeter Grehan 	RB_FOREACH(np, mmio_rb_tree, rbt) {
1364d1e669cSPeter Grehan 		printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
1374d1e669cSPeter Grehan 		       np->mr_param.name);
1384d1e669cSPeter Grehan 	}
1395f4c83abSMarcelo Araujo 	perror = pthread_rwlock_unlock(&mmio_rwlock);
1405f4c83abSMarcelo Araujo 	assert(perror == 0);
1414d1e669cSPeter Grehan }
1424d1e669cSPeter Grehan #endif
1434d1e669cSPeter Grehan 
1444d1e669cSPeter Grehan RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
1454d1e669cSPeter Grehan 
146*7d9ef309SJohn Baldwin typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
147*7d9ef309SJohn Baldwin     void *arg);
148cd377eb3SJohn Baldwin 
149ba9b7bf7SNeel Natu static int
150*7d9ef309SJohn Baldwin mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
151ba9b7bf7SNeel Natu {
152ba9b7bf7SNeel Natu 	int error;
153ba9b7bf7SNeel Natu 	struct mem_range *mr = arg;
154ba9b7bf7SNeel Natu 
155*7d9ef309SJohn Baldwin 	error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
156*7d9ef309SJohn Baldwin 	    mr->arg2);
157ba9b7bf7SNeel Natu 	return (error);
158ba9b7bf7SNeel Natu }
159ba9b7bf7SNeel Natu 
160ba9b7bf7SNeel Natu static int
161*7d9ef309SJohn Baldwin mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
162ba9b7bf7SNeel Natu {
163ba9b7bf7SNeel Natu 	int error;
164ba9b7bf7SNeel Natu 	struct mem_range *mr = arg;
165ba9b7bf7SNeel Natu 
166*7d9ef309SJohn Baldwin 	error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
167*7d9ef309SJohn Baldwin 	    mr->arg2);
168ba9b7bf7SNeel Natu 	return (error);
169ba9b7bf7SNeel Natu }
170ba9b7bf7SNeel Natu 
171cd377eb3SJohn Baldwin static int
172*7d9ef309SJohn Baldwin access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
1734d1e669cSPeter Grehan {
1744d1e669cSPeter Grehan 	struct mmio_rb_range *entry;
175*7d9ef309SJohn Baldwin 	int err, perror, immutable, vcpuid;
1764d1e669cSPeter Grehan 
177*7d9ef309SJohn Baldwin 	vcpuid = vcpu_id(vcpu);
178ae551da6SNeel Natu 	pthread_rwlock_rdlock(&mmio_rwlock);
1794d1e669cSPeter Grehan 	/*
1804d1e669cSPeter Grehan 	 * First check the per-vCPU cache
1814d1e669cSPeter Grehan 	 */
182*7d9ef309SJohn Baldwin 	if (mmio_hint[vcpuid] &&
183*7d9ef309SJohn Baldwin 	    paddr >= mmio_hint[vcpuid]->mr_base &&
184*7d9ef309SJohn Baldwin 	    paddr <= mmio_hint[vcpuid]->mr_end) {
185*7d9ef309SJohn Baldwin 		entry = mmio_hint[vcpuid];
186ba9b7bf7SNeel Natu 	} else
187ba9b7bf7SNeel Natu 		entry = NULL;
188ba9b7bf7SNeel Natu 
189ba9b7bf7SNeel Natu 	if (entry == NULL) {
190028d9311SNeel Natu 		if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
191ba9b7bf7SNeel Natu 			/* Update the per-vCPU cache */
192*7d9ef309SJohn Baldwin 			mmio_hint[vcpuid] = entry;
1930ab13648SPeter Grehan 		} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
1945f4c83abSMarcelo Araujo 			perror = pthread_rwlock_unlock(&mmio_rwlock);
1955f4c83abSMarcelo Araujo 			assert(perror == 0);
1960ab13648SPeter Grehan 			return (ESRCH);
1970ab13648SPeter Grehan 		}
1984d1e669cSPeter Grehan 	}
1994d1e669cSPeter Grehan 
2000ab13648SPeter Grehan 	assert(entry != NULL);
20112a6eb99SNeel Natu 
20212a6eb99SNeel Natu 	/*
20312a6eb99SNeel Natu 	 * An 'immutable' memory range is guaranteed to be never removed
20412a6eb99SNeel Natu 	 * so there is no need to hold 'mmio_rwlock' while calling the
20512a6eb99SNeel Natu 	 * handler.
20612a6eb99SNeel Natu 	 *
20712a6eb99SNeel Natu 	 * XXX writes to the PCIR_COMMAND register can cause register_mem()
20812a6eb99SNeel Natu 	 * to be called. If the guest is using PCI extended config space
20912a6eb99SNeel Natu 	 * to modify the PCIR_COMMAND register then register_mem() can
21012a6eb99SNeel Natu 	 * deadlock on 'mmio_rwlock'. However by registering the extended
21112a6eb99SNeel Natu 	 * config space window as 'immutable' the deadlock can be avoided.
21212a6eb99SNeel Natu 	 */
21312a6eb99SNeel Natu 	immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE);
2145f4c83abSMarcelo Araujo 	if (immutable) {
2155f4c83abSMarcelo Araujo 		perror = pthread_rwlock_unlock(&mmio_rwlock);
2165f4c83abSMarcelo Araujo 		assert(perror == 0);
2175f4c83abSMarcelo Araujo 	}
21812a6eb99SNeel Natu 
219*7d9ef309SJohn Baldwin 	err = cb(vcpu, paddr, &entry->mr_param, arg);
22012a6eb99SNeel Natu 
2215f4c83abSMarcelo Araujo 	if (!immutable) {
2225f4c83abSMarcelo Araujo 		perror = pthread_rwlock_unlock(&mmio_rwlock);
2235f4c83abSMarcelo Araujo 		assert(perror == 0);
2245f4c83abSMarcelo Araujo 	}
2255f4c83abSMarcelo Araujo 
2264d1e669cSPeter Grehan 	return (err);
2274d1e669cSPeter Grehan }
2284d1e669cSPeter Grehan 
229cd377eb3SJohn Baldwin struct emulate_mem_args {
230cd377eb3SJohn Baldwin 	struct vie *vie;
231cd377eb3SJohn Baldwin 	struct vm_guest_paging *paging;
232cd377eb3SJohn Baldwin };
233cd377eb3SJohn Baldwin 
234cd377eb3SJohn Baldwin static int
235*7d9ef309SJohn Baldwin emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
236cd377eb3SJohn Baldwin     void *arg)
237cd377eb3SJohn Baldwin {
238cd377eb3SJohn Baldwin 	struct emulate_mem_args *ema;
239cd377eb3SJohn Baldwin 
240cd377eb3SJohn Baldwin 	ema = arg;
241*7d9ef309SJohn Baldwin 	return (vmm_emulate_instruction(vcpu, paddr, ema->vie, ema->paging,
242cd377eb3SJohn Baldwin 	    mem_read, mem_write, mr));
243cd377eb3SJohn Baldwin }
244cd377eb3SJohn Baldwin 
245cd377eb3SJohn Baldwin int
246*7d9ef309SJohn Baldwin emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
247cd377eb3SJohn Baldwin     struct vm_guest_paging *paging)
248cd377eb3SJohn Baldwin {
249cd377eb3SJohn Baldwin 	struct emulate_mem_args ema;
250cd377eb3SJohn Baldwin 
251cd377eb3SJohn Baldwin 	ema.vie = vie;
252cd377eb3SJohn Baldwin 	ema.paging = paging;
253*7d9ef309SJohn Baldwin 	return (access_memory(vcpu, paddr, emulate_mem_cb, &ema));
254cd377eb3SJohn Baldwin }
255cd377eb3SJohn Baldwin 
2561b52cd45SJohn Baldwin struct rw_mem_args {
2571b52cd45SJohn Baldwin 	uint64_t *val;
258cd377eb3SJohn Baldwin 	int size;
2591b52cd45SJohn Baldwin 	int operation;
260cd377eb3SJohn Baldwin };
261cd377eb3SJohn Baldwin 
262cd377eb3SJohn Baldwin static int
263*7d9ef309SJohn Baldwin rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr, void *arg)
264cd377eb3SJohn Baldwin {
2651b52cd45SJohn Baldwin 	struct rw_mem_args *rma;
266cd377eb3SJohn Baldwin 
267cd377eb3SJohn Baldwin 	rma = arg;
268*7d9ef309SJohn Baldwin 	return (mr->handler(vcpu, rma->operation, paddr, rma->size,
2691b52cd45SJohn Baldwin 	    rma->val, mr->arg1, mr->arg2));
270cd377eb3SJohn Baldwin }
271cd377eb3SJohn Baldwin 
272cd377eb3SJohn Baldwin int
273*7d9ef309SJohn Baldwin read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
274cd377eb3SJohn Baldwin {
2751b52cd45SJohn Baldwin 	struct rw_mem_args rma;
276cd377eb3SJohn Baldwin 
2771b52cd45SJohn Baldwin 	rma.val = rval;
278cd377eb3SJohn Baldwin 	rma.size = size;
2791b52cd45SJohn Baldwin 	rma.operation = MEM_F_READ;
280*7d9ef309SJohn Baldwin 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
2811b52cd45SJohn Baldwin }
2821b52cd45SJohn Baldwin 
2831b52cd45SJohn Baldwin int
284*7d9ef309SJohn Baldwin write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
2851b52cd45SJohn Baldwin {
2861b52cd45SJohn Baldwin 	struct rw_mem_args rma;
2871b52cd45SJohn Baldwin 
2881b52cd45SJohn Baldwin 	rma.val = &wval;
2891b52cd45SJohn Baldwin 	rma.size = size;
2901b52cd45SJohn Baldwin 	rma.operation = MEM_F_WRITE;
291*7d9ef309SJohn Baldwin 	return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
292cd377eb3SJohn Baldwin }
293cd377eb3SJohn Baldwin 
2940ab13648SPeter Grehan static int
2950ab13648SPeter Grehan register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
2964d1e669cSPeter Grehan {
297028d9311SNeel Natu 	struct mmio_rb_range *entry, *mrp;
2985f4c83abSMarcelo Araujo 	int err, perror;
2994d1e669cSPeter Grehan 
3004d1e669cSPeter Grehan 	err = 0;
3014d1e669cSPeter Grehan 
3024d1e669cSPeter Grehan 	mrp = malloc(sizeof(struct mmio_rb_range));
303f2b5dc3aSMarcelo Araujo 	if (mrp == NULL) {
304f2b5dc3aSMarcelo Araujo 		warn("%s: couldn't allocate memory for mrp\n",
305f2b5dc3aSMarcelo Araujo 		     __func__);
306f2b5dc3aSMarcelo Araujo 		err = ENOMEM;
307f2b5dc3aSMarcelo Araujo 	} else {
3084d1e669cSPeter Grehan 		mrp->mr_param = *memp;
3094d1e669cSPeter Grehan 		mrp->mr_base = memp->base;
3104d1e669cSPeter Grehan 		mrp->mr_end = memp->base + memp->size - 1;
311ae551da6SNeel Natu 		pthread_rwlock_wrlock(&mmio_rwlock);
312028d9311SNeel Natu 		if (mmio_rb_lookup(rbt, memp->base, &entry) != 0)
3130ab13648SPeter Grehan 			err = mmio_rb_add(rbt, mrp);
3145f4c83abSMarcelo Araujo 		perror = pthread_rwlock_unlock(&mmio_rwlock);
3155f4c83abSMarcelo Araujo 		assert(perror == 0);
3164d1e669cSPeter Grehan 		if (err)
3174d1e669cSPeter Grehan 			free(mrp);
318f2b5dc3aSMarcelo Araujo 	}
3194d1e669cSPeter Grehan 
3204d1e669cSPeter Grehan 	return (err);
3214d1e669cSPeter Grehan }
3224d1e669cSPeter Grehan 
3230ab13648SPeter Grehan int
3240ab13648SPeter Grehan register_mem(struct mem_range *memp)
3250ab13648SPeter Grehan {
3260ab13648SPeter Grehan 
3270ab13648SPeter Grehan 	return (register_mem_int(&mmio_rb_root, memp));
3280ab13648SPeter Grehan }
3290ab13648SPeter Grehan 
3300ab13648SPeter Grehan int
3310ab13648SPeter Grehan register_mem_fallback(struct mem_range *memp)
3320ab13648SPeter Grehan {
3330ab13648SPeter Grehan 
3340ab13648SPeter Grehan 	return (register_mem_int(&mmio_rb_fallback, memp));
3350ab13648SPeter Grehan }
3360ab13648SPeter Grehan 
337028d9311SNeel Natu int
338028d9311SNeel Natu unregister_mem(struct mem_range *memp)
339028d9311SNeel Natu {
340028d9311SNeel Natu 	struct mem_range *mr;
341028d9311SNeel Natu 	struct mmio_rb_range *entry = NULL;
3425f4c83abSMarcelo Araujo 	int err, perror, i;
343028d9311SNeel Natu 
344ae551da6SNeel Natu 	pthread_rwlock_wrlock(&mmio_rwlock);
345028d9311SNeel Natu 	err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry);
346028d9311SNeel Natu 	if (err == 0) {
347028d9311SNeel Natu 		mr = &entry->mr_param;
348028d9311SNeel Natu 		assert(mr->name == memp->name);
349028d9311SNeel Natu 		assert(mr->base == memp->base && mr->size == memp->size);
35012a6eb99SNeel Natu 		assert((mr->flags & MEM_F_IMMUTABLE) == 0);
351028d9311SNeel Natu 		RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry);
352028d9311SNeel Natu 
353028d9311SNeel Natu 		/* flush Per-vCPU cache */
354730510dcSJohn Baldwin 		for (i = 0; i < mmio_ncpu; i++) {
355028d9311SNeel Natu 			if (mmio_hint[i] == entry)
356028d9311SNeel Natu 				mmio_hint[i] = NULL;
357028d9311SNeel Natu 		}
358028d9311SNeel Natu 	}
3595f4c83abSMarcelo Araujo 	perror = pthread_rwlock_unlock(&mmio_rwlock);
3605f4c83abSMarcelo Araujo 	assert(perror == 0);
361028d9311SNeel Natu 
362028d9311SNeel Natu 	if (entry)
363028d9311SNeel Natu 		free(entry);
364028d9311SNeel Natu 
365028d9311SNeel Natu 	return (err);
366028d9311SNeel Natu }
367028d9311SNeel Natu 
3684d1e669cSPeter Grehan void
369730510dcSJohn Baldwin init_mem(int ncpu)
3704d1e669cSPeter Grehan {
3714d1e669cSPeter Grehan 
372730510dcSJohn Baldwin 	mmio_ncpu = ncpu;
373730510dcSJohn Baldwin 	mmio_hint = calloc(ncpu, sizeof(*mmio_hint));
3740ab13648SPeter Grehan 	RB_INIT(&mmio_rb_root);
3750ab13648SPeter Grehan 	RB_INIT(&mmio_rb_fallback);
376ae551da6SNeel Natu 	pthread_rwlock_init(&mmio_rwlock, NULL);
3774d1e669cSPeter Grehan }
378