xref: /freebsd/sys/vm/memguard.c (revision a2a200a24d6137b42a72f41c8815e6c58f098d02)
1c0587701SJoel Dahl /*-
2e3813573SMatthew D Fleming  * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3e3813573SMatthew D Fleming  * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4e3813573SMatthew D Fleming  * All rights reserved.
5e4eb384bSBosko Milekic  *
6e4eb384bSBosko Milekic  * Redistribution and use in source and binary forms, with or without
7e4eb384bSBosko Milekic  * modification, are permitted provided that the following conditions
8e4eb384bSBosko Milekic  * are met:
9e4eb384bSBosko Milekic  * 1. Redistributions of source code must retain the above copyright
10e4eb384bSBosko Milekic  *    notice unmodified, this list of conditions, and the following
11e4eb384bSBosko Milekic  *    disclaimer.
12e4eb384bSBosko Milekic  * 2. Redistributions in binary form must reproduce the above copyright
13e4eb384bSBosko Milekic  *    notice, this list of conditions and the following disclaimer in the
14e4eb384bSBosko Milekic  *    documentation and/or other materials provided with the distribution.
15e4eb384bSBosko Milekic  *
16e4eb384bSBosko Milekic  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17e4eb384bSBosko Milekic  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18e4eb384bSBosko Milekic  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19e4eb384bSBosko Milekic  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20e4eb384bSBosko Milekic  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21e4eb384bSBosko Milekic  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22e4eb384bSBosko Milekic  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23e4eb384bSBosko Milekic  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24e4eb384bSBosko Milekic  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25e4eb384bSBosko Milekic  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26e4eb384bSBosko Milekic  */
27e4eb384bSBosko Milekic 
28e4eb384bSBosko Milekic #include <sys/cdefs.h>
29e4eb384bSBosko Milekic __FBSDID("$FreeBSD$");
30e4eb384bSBosko Milekic 
31e4eb384bSBosko Milekic /*
32e4eb384bSBosko Milekic  * MemGuard is a simple replacement allocator for debugging only
33e4eb384bSBosko Milekic  * which provides ElectricFence-style memory barrier protection on
34e4eb384bSBosko Milekic  * objects being allocated, and is used to detect tampering-after-free
35e4eb384bSBosko Milekic  * scenarios.
36e4eb384bSBosko Milekic  *
37e4eb384bSBosko Milekic  * See the memguard(9) man page for more information on using MemGuard.
38e4eb384bSBosko Milekic  */
39e4eb384bSBosko Milekic 
40f02d86e2SMatthew D Fleming #include "opt_vm.h"
41f02d86e2SMatthew D Fleming 
42e4eb384bSBosko Milekic #include <sys/param.h>
43e4eb384bSBosko Milekic #include <sys/systm.h>
44e4eb384bSBosko Milekic #include <sys/kernel.h>
45e4eb384bSBosko Milekic #include <sys/types.h>
46e4eb384bSBosko Milekic #include <sys/queue.h>
47e4eb384bSBosko Milekic #include <sys/lock.h>
48e4eb384bSBosko Milekic #include <sys/mutex.h>
49e4eb384bSBosko Milekic #include <sys/malloc.h>
50d362c40dSPawel Jakub Dawidek #include <sys/sysctl.h>
51e4eb384bSBosko Milekic 
52e4eb384bSBosko Milekic #include <vm/vm.h>
53e3813573SMatthew D Fleming #include <vm/uma.h>
5403412565SBosko Milekic #include <vm/vm_param.h>
55e4eb384bSBosko Milekic #include <vm/vm_page.h>
56e4eb384bSBosko Milekic #include <vm/vm_map.h>
57e3813573SMatthew D Fleming #include <vm/vm_object.h>
58e4eb384bSBosko Milekic #include <vm/vm_extern.h>
59e4eb384bSBosko Milekic #include <vm/memguard.h>
60e4eb384bSBosko Milekic 
61d362c40dSPawel Jakub Dawidek SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
62d362c40dSPawel Jakub Dawidek /*
63d362c40dSPawel Jakub Dawidek  * The vm_memguard_divisor variable controls how much of kmem_map should be
64d362c40dSPawel Jakub Dawidek  * reserved for MemGuard.
65d362c40dSPawel Jakub Dawidek  */
66e3813573SMatthew D Fleming static u_int vm_memguard_divisor;
67e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
68e3813573SMatthew D Fleming     &vm_memguard_divisor,
69d362c40dSPawel Jakub Dawidek     0, "(kmem_size/memguard_divisor) == memguard submap size");
70d362c40dSPawel Jakub Dawidek 
71d362c40dSPawel Jakub Dawidek /*
72d362c40dSPawel Jakub Dawidek  * Short description (ks_shortdesc) of memory type to monitor.
73d362c40dSPawel Jakub Dawidek  */
74d362c40dSPawel Jakub Dawidek static char vm_memguard_desc[128] = "";
75d362c40dSPawel Jakub Dawidek static struct malloc_type *vm_memguard_mtype = NULL;
76d362c40dSPawel Jakub Dawidek TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
77d362c40dSPawel Jakub Dawidek static int
78d362c40dSPawel Jakub Dawidek memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
79d362c40dSPawel Jakub Dawidek {
80e3813573SMatthew D Fleming 	char desc[sizeof(vm_memguard_desc)];
81e3813573SMatthew D Fleming 	int error;
82d362c40dSPawel Jakub Dawidek 
83d362c40dSPawel Jakub Dawidek 	strlcpy(desc, vm_memguard_desc, sizeof(desc));
84d362c40dSPawel Jakub Dawidek 	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
85d362c40dSPawel Jakub Dawidek 	if (error != 0 || req->newptr == NULL)
86d362c40dSPawel Jakub Dawidek 		return (error);
87d362c40dSPawel Jakub Dawidek 
88d362c40dSPawel Jakub Dawidek 	mtx_lock(&malloc_mtx);
89d362c40dSPawel Jakub Dawidek 	/*
90d362c40dSPawel Jakub Dawidek 	 * If mtp is NULL, it will be initialized in memguard_cmp().
91d362c40dSPawel Jakub Dawidek 	 */
92e3813573SMatthew D Fleming 	vm_memguard_mtype = malloc_desc2type(desc);
93d362c40dSPawel Jakub Dawidek 	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
94d362c40dSPawel Jakub Dawidek 	mtx_unlock(&malloc_mtx);
95d362c40dSPawel Jakub Dawidek 	return (error);
96d362c40dSPawel Jakub Dawidek }
97e3813573SMatthew D Fleming SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
98e3813573SMatthew D Fleming     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
99d362c40dSPawel Jakub Dawidek     memguard_sysctl_desc, "A", "Short description of memory type to monitor");
100d362c40dSPawel Jakub Dawidek 
101e3813573SMatthew D Fleming static vm_map_t memguard_map = NULL;
102e3813573SMatthew D Fleming static vm_offset_t memguard_cursor;
103e3813573SMatthew D Fleming static vm_size_t memguard_mapsize;
104e3813573SMatthew D Fleming static vm_size_t memguard_physlimit;
105e3813573SMatthew D Fleming static u_long memguard_wasted;
106e3813573SMatthew D Fleming static u_long memguard_wrap;
107e3813573SMatthew D Fleming static u_long memguard_succ;
108e3813573SMatthew D Fleming static u_long memguard_fail_kva;
109e3813573SMatthew D Fleming static u_long memguard_fail_pgs;
110e3813573SMatthew D Fleming 
111e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
112e3813573SMatthew D Fleming     &memguard_cursor, 0, "MemGuard cursor");
113e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
114e3813573SMatthew D Fleming     &memguard_mapsize, 0, "MemGuard private vm_map size");
115e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
116e3813573SMatthew D Fleming     &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
117e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
118e3813573SMatthew D Fleming     &memguard_wasted, 0, "Excess memory used through page promotion");
119e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
120e3813573SMatthew D Fleming     &memguard_wrap, 0, "MemGuard cursor wrap count");
121e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
122e3813573SMatthew D Fleming     &memguard_succ, 0, "Count of successful MemGuard allocations");
123e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
124e3813573SMatthew D Fleming     &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
125e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
126e3813573SMatthew D Fleming     &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
127e3813573SMatthew D Fleming 
128e3813573SMatthew D Fleming #define MG_GUARD	0x001
129e3813573SMatthew D Fleming #define MG_ALLLARGE	0x002
130e3813573SMatthew D Fleming static int memguard_options = MG_GUARD;
131e3813573SMatthew D Fleming TUNABLE_INT("vm.memguard.options", &memguard_options);
132e3813573SMatthew D Fleming SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
133e3813573SMatthew D Fleming     &memguard_options, 0,
134e3813573SMatthew D Fleming     "MemGuard options:\n"
135e3813573SMatthew D Fleming     "\t0x001 - add guard pages around each allocation\n"
136e3813573SMatthew D Fleming     "\t0x002 - always use MemGuard for allocations over a page");
137e3813573SMatthew D Fleming 
138e3813573SMatthew D Fleming static u_int memguard_minsize;
139e3813573SMatthew D Fleming static u_long memguard_minsize_reject;
140e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
141e3813573SMatthew D Fleming     &memguard_minsize, 0, "Minimum size for page promotion");
142e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
143e3813573SMatthew D Fleming     &memguard_minsize_reject, 0, "# times rejected for size");
144e3813573SMatthew D Fleming 
145e3813573SMatthew D Fleming static u_int memguard_frequency;
146e3813573SMatthew D Fleming static u_long memguard_frequency_hits;
147e3813573SMatthew D Fleming TUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
148e3813573SMatthew D Fleming SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
149e3813573SMatthew D Fleming     &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
150e3813573SMatthew D Fleming SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
151e3813573SMatthew D Fleming     &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
152e3813573SMatthew D Fleming 
153e4eb384bSBosko Milekic 
154e4eb384bSBosko Milekic /*
155e3813573SMatthew D Fleming  * Return a fudged value to be used for vm_kmem_size for allocating
156e3813573SMatthew D Fleming  * the kmem_map.  The memguard memory will be a submap.
157e4eb384bSBosko Milekic  */
158e3813573SMatthew D Fleming unsigned long
159e3813573SMatthew D Fleming memguard_fudge(unsigned long km_size, unsigned long km_max)
160e3813573SMatthew D Fleming {
161e3813573SMatthew D Fleming 	u_long mem_pgs = cnt.v_page_count;
162e4eb384bSBosko Milekic 
163e3813573SMatthew D Fleming 	vm_memguard_divisor = 10;
164e3813573SMatthew D Fleming 	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
165e3813573SMatthew D Fleming 
166e3813573SMatthew D Fleming 	/* Pick a conservative value if provided value sucks. */
167e3813573SMatthew D Fleming 	if ((vm_memguard_divisor <= 0) ||
168e3813573SMatthew D Fleming 	    ((km_size / vm_memguard_divisor) == 0))
169e3813573SMatthew D Fleming 		vm_memguard_divisor = 10;
170e4eb384bSBosko Milekic 	/*
171e3813573SMatthew D Fleming 	 * Limit consumption of physical pages to
172e3813573SMatthew D Fleming 	 * 1/vm_memguard_divisor of system memory.  If the KVA is
173e3813573SMatthew D Fleming 	 * smaller than this then the KVA limit comes into play first.
174e3813573SMatthew D Fleming 	 * This prevents memguard's page promotions from completely
175e3813573SMatthew D Fleming 	 * using up memory, since most malloc(9) calls are sub-page.
176e4eb384bSBosko Milekic 	 */
177e3813573SMatthew D Fleming 	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
178e3813573SMatthew D Fleming 	/*
179e3813573SMatthew D Fleming 	 * We want as much KVA as we can take safely.  Use at most our
180e3813573SMatthew D Fleming 	 * allotted fraction of kmem_max.  Limit this to twice the
181e3813573SMatthew D Fleming 	 * physical memory to avoid using too much memory as pagetable
182e3813573SMatthew D Fleming 	 * pages.
183e3813573SMatthew D Fleming 	 */
184e3813573SMatthew D Fleming 	memguard_mapsize = km_max / vm_memguard_divisor;
185e3813573SMatthew D Fleming 	/* size must be multiple of PAGE_SIZE */
186e3813573SMatthew D Fleming 	memguard_mapsize = round_page(memguard_mapsize);
187e3813573SMatthew D Fleming 	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
188e3813573SMatthew D Fleming 		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
189e3813573SMatthew D Fleming 	if (km_size + memguard_mapsize > km_max)
190e3813573SMatthew D Fleming 		return (km_max);
191e3813573SMatthew D Fleming 	return (km_size + memguard_mapsize);
192e3813573SMatthew D Fleming }
193e4eb384bSBosko Milekic 
194e4eb384bSBosko Milekic /*
195e4eb384bSBosko Milekic  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
196e4eb384bSBosko Milekic  * out of a single VM map (contiguous chunk of address space).
197e4eb384bSBosko Milekic  */
198e4eb384bSBosko Milekic void
199e3813573SMatthew D Fleming memguard_init(vm_map_t parent_map)
200e4eb384bSBosko Milekic {
201e3813573SMatthew D Fleming 	vm_offset_t base, limit;
202e4eb384bSBosko Milekic 
203e3813573SMatthew D Fleming 	memguard_map = kmem_suballoc(parent_map, &base, &limit,
204e3813573SMatthew D Fleming 	    memguard_mapsize, FALSE);
205e4eb384bSBosko Milekic 	memguard_map->system_map = 1;
206e3813573SMatthew D Fleming 	KASSERT(memguard_mapsize == limit - base,
207e3813573SMatthew D Fleming 	    ("Expected %lu, got %lu", (u_long)memguard_mapsize,
208e3813573SMatthew D Fleming 	     (u_long)(limit - base)));
209e3813573SMatthew D Fleming 	memguard_cursor = base;
210e4eb384bSBosko Milekic 
211e4eb384bSBosko Milekic 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
212e3813573SMatthew D Fleming 	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
213e3813573SMatthew D Fleming 	printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit);
214e3813573SMatthew D Fleming 	printf("\tMEMGUARD map size: %jd KBytes\n",
215e3813573SMatthew D Fleming 	    (uintmax_t)memguard_mapsize >> 10);
216e4eb384bSBosko Milekic }
217e4eb384bSBosko Milekic 
218e4eb384bSBosko Milekic /*
219e3813573SMatthew D Fleming  * Run things that can't be done as early as memguard_init().
220e3813573SMatthew D Fleming  */
221e3813573SMatthew D Fleming static void
222e3813573SMatthew D Fleming memguard_sysinit(void)
223e3813573SMatthew D Fleming {
224e3813573SMatthew D Fleming 	struct sysctl_oid_list *parent;
225e3813573SMatthew D Fleming 
226e3813573SMatthew D Fleming 	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
227e3813573SMatthew D Fleming 
228e3813573SMatthew D Fleming 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
229e3813573SMatthew D Fleming 	    &memguard_map->min_offset, "MemGuard KVA base");
230e3813573SMatthew D Fleming 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
231e3813573SMatthew D Fleming 	    &memguard_map->max_offset, "MemGuard KVA end");
232e3813573SMatthew D Fleming 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
233e3813573SMatthew D Fleming 	    &memguard_map->size, "MemGuard KVA used");
234e3813573SMatthew D Fleming }
235e3813573SMatthew D Fleming SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
236e3813573SMatthew D Fleming 
237e3813573SMatthew D Fleming /*
238e3813573SMatthew D Fleming  * v2sizep() converts a virtual address of the first page allocated for
239e3813573SMatthew D Fleming  * an item to a pointer to u_long recording the size of the original
240e3813573SMatthew D Fleming  * allocation request.
241e3813573SMatthew D Fleming  *
242e3813573SMatthew D Fleming  * This routine is very similar to those defined by UMA in uma_int.h.
243e3813573SMatthew D Fleming  * The difference is that this routine stores the originally allocated
244e3813573SMatthew D Fleming  * size in one of the page's fields that is unused when the page is
245e3813573SMatthew D Fleming  * wired rather than the object field, which is used.
246e3813573SMatthew D Fleming  */
247e3813573SMatthew D Fleming static u_long *
248e3813573SMatthew D Fleming v2sizep(vm_offset_t va)
249e3813573SMatthew D Fleming {
250*a2a200a2SMatthew D Fleming 	vm_paddr_t pa;
251e3813573SMatthew D Fleming 	struct vm_page *p;
252e3813573SMatthew D Fleming 
253*a2a200a2SMatthew D Fleming 	pa = pmap_kextract(va);
254*a2a200a2SMatthew D Fleming 	if (pa == 0)
255*a2a200a2SMatthew D Fleming 		panic("MemGuard detected double-free of %p", (void *)va);
256*a2a200a2SMatthew D Fleming 	p = PHYS_TO_VM_PAGE(pa);
257e3813573SMatthew D Fleming 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
258e3813573SMatthew D Fleming 	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
259e3813573SMatthew D Fleming 	return ((u_long *)&p->pageq.tqe_next);
260e3813573SMatthew D Fleming }
261e3813573SMatthew D Fleming 
262e3813573SMatthew D Fleming /*
263e3813573SMatthew D Fleming  * Allocate a single object of specified size with specified flags
264e3813573SMatthew D Fleming  * (either M_WAITOK or M_NOWAIT).
265e4eb384bSBosko Milekic  */
266e4eb384bSBosko Milekic void *
267e3813573SMatthew D Fleming memguard_alloc(unsigned long req_size, int flags)
268e4eb384bSBosko Milekic {
269e3813573SMatthew D Fleming 	vm_offset_t addr;
270e3813573SMatthew D Fleming 	u_long size_p, size_v;
271e3813573SMatthew D Fleming 	int do_guard, rv;
272e4eb384bSBosko Milekic 
273e3813573SMatthew D Fleming 	size_p = round_page(req_size);
274e3813573SMatthew D Fleming 	if (size_p == 0)
275e3813573SMatthew D Fleming 		return (NULL);
276e4eb384bSBosko Milekic 	/*
277e3813573SMatthew D Fleming 	 * To ensure there are holes on both sides of the allocation,
278e3813573SMatthew D Fleming 	 * request 2 extra pages of KVA.  We will only actually add a
279e3813573SMatthew D Fleming 	 * vm_map_entry and get pages for the original request.  Save
280e3813573SMatthew D Fleming 	 * the value of memguard_options so we have a consistent
281e3813573SMatthew D Fleming 	 * value.
282e4eb384bSBosko Milekic 	 */
283e3813573SMatthew D Fleming 	size_v = size_p;
284e3813573SMatthew D Fleming 	do_guard = (memguard_options & MG_GUARD) != 0;
285e3813573SMatthew D Fleming 	if (do_guard)
286e3813573SMatthew D Fleming 		size_v += 2 * PAGE_SIZE;
287e4eb384bSBosko Milekic 
288e3813573SMatthew D Fleming 	vm_map_lock(memguard_map);
289e3813573SMatthew D Fleming 	/*
290e3813573SMatthew D Fleming 	 * When we pass our memory limit, reject sub-page allocations.
291e3813573SMatthew D Fleming 	 * Page-size and larger allocations will use the same amount
292e3813573SMatthew D Fleming 	 * of physical memory whether we allocate or hand off to
293e3813573SMatthew D Fleming 	 * uma_large_alloc(), so keep those.
294e3813573SMatthew D Fleming 	 */
295e3813573SMatthew D Fleming 	if (memguard_map->size >= memguard_physlimit &&
296e3813573SMatthew D Fleming 	    req_size < PAGE_SIZE) {
297e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
298e3813573SMatthew D Fleming 		memguard_fail_pgs++;
299e3813573SMatthew D Fleming 		goto out;
300e4eb384bSBosko Milekic 	}
301e3813573SMatthew D Fleming 	/*
302e3813573SMatthew D Fleming 	 * Keep a moving cursor so we don't recycle KVA as long as
303e3813573SMatthew D Fleming 	 * possible.  It's not perfect, since we don't know in what
304e3813573SMatthew D Fleming 	 * order previous allocations will be free'd, but it's simple
305e3813573SMatthew D Fleming 	 * and fast, and requires O(1) additional storage if guard
306e3813573SMatthew D Fleming 	 * pages are not used.
307e3813573SMatthew D Fleming 	 *
308e3813573SMatthew D Fleming 	 * XXX This scheme will lead to greater fragmentation of the
309e3813573SMatthew D Fleming 	 * map, unless vm_map_findspace() is tweaked.
310e3813573SMatthew D Fleming 	 */
311e3813573SMatthew D Fleming 	for (;;) {
312e3813573SMatthew D Fleming 		rv = vm_map_findspace(memguard_map, memguard_cursor,
313e3813573SMatthew D Fleming 		    size_v, &addr);
314e3813573SMatthew D Fleming 		if (rv == KERN_SUCCESS)
315e3813573SMatthew D Fleming 			break;
316e3813573SMatthew D Fleming 		/*
317e3813573SMatthew D Fleming 		 * The map has no space.  This may be due to
318e3813573SMatthew D Fleming 		 * fragmentation, or because the cursor is near the
319e3813573SMatthew D Fleming 		 * end of the map.
320e3813573SMatthew D Fleming 		 */
321e3813573SMatthew D Fleming 		if (memguard_cursor == vm_map_min(memguard_map)) {
322e3813573SMatthew D Fleming 			memguard_fail_kva++;
323e3813573SMatthew D Fleming 			addr = (vm_offset_t)NULL;
324e3813573SMatthew D Fleming 			goto out;
325e3813573SMatthew D Fleming 		}
326e3813573SMatthew D Fleming 		memguard_wrap++;
327e3813573SMatthew D Fleming 		memguard_cursor = vm_map_min(memguard_map);
328e3813573SMatthew D Fleming 	}
329e3813573SMatthew D Fleming 	if (do_guard)
330e3813573SMatthew D Fleming 		addr += PAGE_SIZE;
331e3813573SMatthew D Fleming 	rv = kmem_back(memguard_map, addr, size_p, flags);
332e3813573SMatthew D Fleming 	if (rv != KERN_SUCCESS) {
333e3813573SMatthew D Fleming 		memguard_fail_pgs++;
334e3813573SMatthew D Fleming 		addr = (vm_offset_t)NULL;
335e3813573SMatthew D Fleming 		goto out;
336e3813573SMatthew D Fleming 	}
337e3813573SMatthew D Fleming 	memguard_cursor = addr + size_p;
338e3813573SMatthew D Fleming 	*v2sizep(trunc_page(addr)) = req_size;
339e3813573SMatthew D Fleming 	memguard_succ++;
340e3813573SMatthew D Fleming 	if (req_size < PAGE_SIZE) {
341e3813573SMatthew D Fleming 		memguard_wasted += (PAGE_SIZE - req_size);
342e3813573SMatthew D Fleming 		if (do_guard) {
343e3813573SMatthew D Fleming 			/*
344e3813573SMatthew D Fleming 			 * Align the request to 16 bytes, and return
345e3813573SMatthew D Fleming 			 * an address near the end of the page, to
346e3813573SMatthew D Fleming 			 * better detect array overrun.
347e3813573SMatthew D Fleming 			 */
348e3813573SMatthew D Fleming 			req_size = roundup2(req_size, 16);
349e3813573SMatthew D Fleming 			addr += (PAGE_SIZE - req_size);
350e3813573SMatthew D Fleming 		}
351e3813573SMatthew D Fleming 	}
352e3813573SMatthew D Fleming out:
353e3813573SMatthew D Fleming 	vm_map_unlock(memguard_map);
354e3813573SMatthew D Fleming 	return ((void *)addr);
355e3813573SMatthew D Fleming }
356e3813573SMatthew D Fleming 
357e3813573SMatthew D Fleming int
358e3813573SMatthew D Fleming is_memguard_addr(void *addr)
359e3813573SMatthew D Fleming {
360e3813573SMatthew D Fleming 	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
361e3813573SMatthew D Fleming 
362e3813573SMatthew D Fleming 	return (a >= memguard_map->min_offset && a < memguard_map->max_offset);
363e4eb384bSBosko Milekic }
364e4eb384bSBosko Milekic 
365e4eb384bSBosko Milekic /*
366e4eb384bSBosko Milekic  * Free specified single object.
367e4eb384bSBosko Milekic  */
368e4eb384bSBosko Milekic void
369e3813573SMatthew D Fleming memguard_free(void *ptr)
370e4eb384bSBosko Milekic {
371e3813573SMatthew D Fleming 	vm_offset_t addr;
372e3813573SMatthew D Fleming 	u_long req_size, size;
373e3813573SMatthew D Fleming 	char *temp;
374e3813573SMatthew D Fleming 	int i;
375e4eb384bSBosko Milekic 
376e3813573SMatthew D Fleming 	addr = trunc_page((uintptr_t)ptr);
377e3813573SMatthew D Fleming 	req_size = *v2sizep(addr);
378e3813573SMatthew D Fleming 	size = round_page(req_size);
37903412565SBosko Milekic 
38003412565SBosko Milekic 	/*
381e3813573SMatthew D Fleming 	 * Page should not be guarded right now, so force a write.
382e3813573SMatthew D Fleming 	 * The purpose of this is to increase the likelihood of
383e3813573SMatthew D Fleming 	 * catching a double-free, but not necessarily a
384e3813573SMatthew D Fleming 	 * tamper-after-free (the second thread freeing might not
385e3813573SMatthew D Fleming 	 * write before freeing, so this forces it to and,
386e3813573SMatthew D Fleming 	 * subsequently, trigger a fault).
38703412565SBosko Milekic 	 */
388e3813573SMatthew D Fleming 	temp = ptr;
389e3813573SMatthew D Fleming 	for (i = 0; i < size; i += PAGE_SIZE)
390e3813573SMatthew D Fleming 		temp[i] = 'M';
39103412565SBosko Milekic 
392e3813573SMatthew D Fleming 	/*
393e3813573SMatthew D Fleming 	 * This requires carnal knowledge of the implementation of
394e3813573SMatthew D Fleming 	 * kmem_free(), but since we've already replaced kmem_malloc()
395e3813573SMatthew D Fleming 	 * above, it's not really any worse.  We want to use the
396e3813573SMatthew D Fleming 	 * vm_map lock to serialize updates to memguard_wasted, since
397e3813573SMatthew D Fleming 	 * we had the lock at increment.
398e3813573SMatthew D Fleming 	 */
399e3813573SMatthew D Fleming 	vm_map_lock(memguard_map);
400e3813573SMatthew D Fleming 	if (req_size < PAGE_SIZE)
401e3813573SMatthew D Fleming 		memguard_wasted -= (PAGE_SIZE - req_size);
402e3813573SMatthew D Fleming 	(void)vm_map_delete(memguard_map, addr, addr + size);
403e3813573SMatthew D Fleming 	vm_map_unlock(memguard_map);
404e4eb384bSBosko Milekic }
405e4eb384bSBosko Milekic 
4066d3ed393SMatthew D Fleming /*
4076d3ed393SMatthew D Fleming  * Re-allocate an allocation that was originally guarded.
4086d3ed393SMatthew D Fleming  */
4096d3ed393SMatthew D Fleming void *
4106d3ed393SMatthew D Fleming memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
4116d3ed393SMatthew D Fleming     int flags)
4126d3ed393SMatthew D Fleming {
4136d3ed393SMatthew D Fleming 	void *newaddr;
4146d3ed393SMatthew D Fleming 	u_long old_size;
4156d3ed393SMatthew D Fleming 
4166d3ed393SMatthew D Fleming 	/*
4176d3ed393SMatthew D Fleming 	 * Allocate the new block.  Force the allocation to be guarded
4186d3ed393SMatthew D Fleming 	 * as the original may have been guarded through random
4196d3ed393SMatthew D Fleming 	 * chance, and that should be preserved.
4206d3ed393SMatthew D Fleming 	 */
4216d3ed393SMatthew D Fleming 	if ((newaddr = memguard_alloc(size, flags)) == NULL)
4226d3ed393SMatthew D Fleming 		return (NULL);
4236d3ed393SMatthew D Fleming 
4246d3ed393SMatthew D Fleming 	/* Copy over original contents. */
4256d3ed393SMatthew D Fleming 	old_size = *v2sizep(trunc_page((uintptr_t)addr));
4266d3ed393SMatthew D Fleming 	bcopy(addr, newaddr, min(size, old_size));
4276d3ed393SMatthew D Fleming 	memguard_free(addr);
4286d3ed393SMatthew D Fleming 	return (newaddr);
4296d3ed393SMatthew D Fleming }
4306d3ed393SMatthew D Fleming 
431d362c40dSPawel Jakub Dawidek int
432e3813573SMatthew D Fleming memguard_cmp(struct malloc_type *mtp, unsigned long size)
433d362c40dSPawel Jakub Dawidek {
434d362c40dSPawel Jakub Dawidek 
435e3813573SMatthew D Fleming 	if (size < memguard_minsize) {
436e3813573SMatthew D Fleming 		memguard_minsize_reject++;
437e3813573SMatthew D Fleming 		return (0);
438e3813573SMatthew D Fleming 	}
439e3813573SMatthew D Fleming 	if ((memguard_options & MG_ALLLARGE) != 0 && size >= PAGE_SIZE)
440e3813573SMatthew D Fleming 		return (1);
441e3813573SMatthew D Fleming 	if (memguard_frequency > 0 &&
442e3813573SMatthew D Fleming 	    (random() % 100000) < memguard_frequency) {
443e3813573SMatthew D Fleming 		memguard_frequency_hits++;
444e3813573SMatthew D Fleming 		return (1);
445e3813573SMatthew D Fleming 	}
446d362c40dSPawel Jakub Dawidek #if 1
447d362c40dSPawel Jakub Dawidek 	/*
448d362c40dSPawel Jakub Dawidek 	 * The safest way of comparsion is to always compare short description
449d362c40dSPawel Jakub Dawidek 	 * string of memory type, but it is also the slowest way.
450d362c40dSPawel Jakub Dawidek 	 */
451d362c40dSPawel Jakub Dawidek 	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
452d362c40dSPawel Jakub Dawidek #else
453d362c40dSPawel Jakub Dawidek 	/*
454d362c40dSPawel Jakub Dawidek 	 * If we compare pointers, there are two possible problems:
455d362c40dSPawel Jakub Dawidek 	 * 1. Memory type was unloaded and new memory type was allocated at the
456d362c40dSPawel Jakub Dawidek 	 *    same address.
457d362c40dSPawel Jakub Dawidek 	 * 2. Memory type was unloaded and loaded again, but allocated at a
458d362c40dSPawel Jakub Dawidek 	 *    different address.
459d362c40dSPawel Jakub Dawidek 	 */
460d362c40dSPawel Jakub Dawidek 	if (vm_memguard_mtype != NULL)
461d362c40dSPawel Jakub Dawidek 		return (mtp == vm_memguard_mtype);
462d362c40dSPawel Jakub Dawidek 	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
463d362c40dSPawel Jakub Dawidek 		vm_memguard_mtype = mtp;
464d362c40dSPawel Jakub Dawidek 		return (1);
465d362c40dSPawel Jakub Dawidek 	}
466d362c40dSPawel Jakub Dawidek 	return (0);
467d362c40dSPawel Jakub Dawidek #endif
468d362c40dSPawel Jakub Dawidek }
469