xref: /freebsd/sys/vm/memguard.c (revision 0f2bd1e89db1a2f09268edea21e0ead329e092df)
1 /*-
2  * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>.
3  * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/)
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * MemGuard is a simple replacement allocator for debugging only
33  * which provides ElectricFence-style memory barrier protection on
34  * objects being allocated, and is used to detect tampering-after-free
35  * scenarios.
36  *
37  * See the memguard(9) man page for more information on using MemGuard.
38  */
39 
40 #include "opt_vm.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/types.h>
46 #include <sys/queue.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 
52 #include <vm/vm.h>
53 #include <vm/uma.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_extern.h>
59 #include <vm/memguard.h>
60 
61 SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
62 /*
63  * The vm_memguard_divisor variable controls how much of kmem_map should be
64  * reserved for MemGuard.
65  */
66 static u_int vm_memguard_divisor;
67 SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
68     &vm_memguard_divisor,
69     0, "(kmem_size/memguard_divisor) == memguard submap size");
70 
71 /*
72  * Short description (ks_shortdesc) of memory type to monitor.
73  */
74 static char vm_memguard_desc[128] = "";
75 static struct malloc_type *vm_memguard_mtype = NULL;
76 TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
77 static int
78 memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
79 {
80 	char desc[sizeof(vm_memguard_desc)];
81 	int error;
82 
83 	strlcpy(desc, vm_memguard_desc, sizeof(desc));
84 	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
85 	if (error != 0 || req->newptr == NULL)
86 		return (error);
87 
88 	mtx_lock(&malloc_mtx);
89 	/*
90 	 * If mtp is NULL, it will be initialized in memguard_cmp().
91 	 */
92 	vm_memguard_mtype = malloc_desc2type(desc);
93 	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
94 	mtx_unlock(&malloc_mtx);
95 	return (error);
96 }
97 SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
98     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
99     memguard_sysctl_desc, "A", "Short description of memory type to monitor");
100 
101 static vm_map_t memguard_map = NULL;
102 static vm_offset_t memguard_cursor;
103 static vm_size_t memguard_mapsize;
104 static vm_size_t memguard_physlimit;
105 static u_long memguard_wasted;
106 static u_long memguard_wrap;
107 static u_long memguard_succ;
108 static u_long memguard_fail_kva;
109 static u_long memguard_fail_pgs;
110 
111 SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
112     &memguard_cursor, 0, "MemGuard cursor");
113 SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
114     &memguard_mapsize, 0, "MemGuard private vm_map size");
115 SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
116     &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
117 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
118     &memguard_wasted, 0, "Excess memory used through page promotion");
119 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
120     &memguard_wrap, 0, "MemGuard cursor wrap count");
121 SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
122     &memguard_succ, 0, "Count of successful MemGuard allocations");
123 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
124     &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
125 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
126     &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
127 
128 #define MG_GUARD	0x001
129 #define MG_ALLLARGE	0x002
130 static int memguard_options = MG_GUARD;
131 TUNABLE_INT("vm.memguard.options", &memguard_options);
132 SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
133     &memguard_options, 0,
134     "MemGuard options:\n"
135     "\t0x001 - add guard pages around each allocation\n"
136     "\t0x002 - always use MemGuard for allocations over a page");
137 
138 static u_int memguard_minsize;
139 static u_long memguard_minsize_reject;
140 SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW,
141     &memguard_minsize, 0, "Minimum size for page promotion");
142 SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD,
143     &memguard_minsize_reject, 0, "# times rejected for size");
144 
145 static u_int memguard_frequency;
146 static u_long memguard_frequency_hits;
147 TUNABLE_INT("vm.memguard.frequency", &memguard_frequency);
148 SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW,
149     &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run");
150 SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
151     &memguard_frequency_hits, 0, "# times MemGuard randomly chose");
152 
153 
154 /*
155  * Return a fudged value to be used for vm_kmem_size for allocating
156  * the kmem_map.  The memguard memory will be a submap.
157  */
158 unsigned long
159 memguard_fudge(unsigned long km_size, unsigned long km_max)
160 {
161 	u_long mem_pgs = cnt.v_page_count;
162 
163 	vm_memguard_divisor = 10;
164 	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
165 
166 	/* Pick a conservative value if provided value sucks. */
167 	if ((vm_memguard_divisor <= 0) ||
168 	    ((km_size / vm_memguard_divisor) == 0))
169 		vm_memguard_divisor = 10;
170 	/*
171 	 * Limit consumption of physical pages to
172 	 * 1/vm_memguard_divisor of system memory.  If the KVA is
173 	 * smaller than this then the KVA limit comes into play first.
174 	 * This prevents memguard's page promotions from completely
175 	 * using up memory, since most malloc(9) calls are sub-page.
176 	 */
177 	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
178 	/*
179 	 * We want as much KVA as we can take safely.  Use at most our
180 	 * allotted fraction of kmem_max.  Limit this to twice the
181 	 * physical memory to avoid using too much memory as pagetable
182 	 * pages.
183 	 */
184 	memguard_mapsize = km_max / vm_memguard_divisor;
185 	/* size must be multiple of PAGE_SIZE */
186 	memguard_mapsize = round_page(memguard_mapsize);
187 	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
188 		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
189 	if (km_size + memguard_mapsize > km_max)
190 		return (km_max);
191 	return (km_size + memguard_mapsize);
192 }
193 
194 /*
195  * Initialize the MemGuard mock allocator.  All objects from MemGuard come
196  * out of a single VM map (contiguous chunk of address space).
197  */
198 void
199 memguard_init(vm_map_t parent_map)
200 {
201 	vm_offset_t base, limit;
202 
203 	memguard_map = kmem_suballoc(parent_map, &base, &limit,
204 	    memguard_mapsize, FALSE);
205 	memguard_map->system_map = 1;
206 	KASSERT(memguard_mapsize == limit - base,
207 	    ("Expected %lu, got %lu", (u_long)memguard_mapsize,
208 	     (u_long)(limit - base)));
209 	memguard_cursor = base;
210 
211 	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
212 	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
213 	printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit);
214 	printf("\tMEMGUARD map size: %jd KBytes\n",
215 	    (uintmax_t)memguard_mapsize >> 10);
216 }
217 
218 /*
219  * Run things that can't be done as early as memguard_init().
220  */
221 static void
222 memguard_sysinit(void)
223 {
224 	struct sysctl_oid_list *parent;
225 
226 	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
227 
228 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
229 	    &memguard_map->min_offset, "MemGuard KVA base");
230 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
231 	    &memguard_map->max_offset, "MemGuard KVA end");
232 	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
233 	    &memguard_map->size, "MemGuard KVA used");
234 }
235 SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
236 
237 /*
238  * v2sizep() converts a virtual address of the first page allocated for
239  * an item to a pointer to u_long recording the size of the original
240  * allocation request.
241  *
242  * This routine is very similar to those defined by UMA in uma_int.h.
243  * The difference is that this routine stores the originally allocated
244  * size in one of the page's fields that is unused when the page is
245  * wired rather than the object field, which is used.
246  */
247 static u_long *
248 v2sizep(vm_offset_t va)
249 {
250 	struct vm_page *p;
251 
252 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
253 	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
254 	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
255 	return ((u_long *)&p->pageq.tqe_next);
256 }
257 
258 /*
259  * Allocate a single object of specified size with specified flags
260  * (either M_WAITOK or M_NOWAIT).
261  */
262 void *
263 memguard_alloc(unsigned long req_size, int flags)
264 {
265 	vm_offset_t addr;
266 	u_long size_p, size_v;
267 	int do_guard, rv;
268 
269 	size_p = round_page(req_size);
270 	if (size_p == 0)
271 		return (NULL);
272 	/*
273 	 * To ensure there are holes on both sides of the allocation,
274 	 * request 2 extra pages of KVA.  We will only actually add a
275 	 * vm_map_entry and get pages for the original request.  Save
276 	 * the value of memguard_options so we have a consistent
277 	 * value.
278 	 */
279 	size_v = size_p;
280 	do_guard = (memguard_options & MG_GUARD) != 0;
281 	if (do_guard)
282 		size_v += 2 * PAGE_SIZE;
283 
284 	vm_map_lock(memguard_map);
285 	/*
286 	 * When we pass our memory limit, reject sub-page allocations.
287 	 * Page-size and larger allocations will use the same amount
288 	 * of physical memory whether we allocate or hand off to
289 	 * uma_large_alloc(), so keep those.
290 	 */
291 	if (memguard_map->size >= memguard_physlimit &&
292 	    req_size < PAGE_SIZE) {
293 		addr = (vm_offset_t)NULL;
294 		memguard_fail_pgs++;
295 		goto out;
296 	}
297 	/*
298 	 * Keep a moving cursor so we don't recycle KVA as long as
299 	 * possible.  It's not perfect, since we don't know in what
300 	 * order previous allocations will be free'd, but it's simple
301 	 * and fast, and requires O(1) additional storage if guard
302 	 * pages are not used.
303 	 *
304 	 * XXX This scheme will lead to greater fragmentation of the
305 	 * map, unless vm_map_findspace() is tweaked.
306 	 */
307 	for (;;) {
308 		rv = vm_map_findspace(memguard_map, memguard_cursor,
309 		    size_v, &addr);
310 		if (rv == KERN_SUCCESS)
311 			break;
312 		/*
313 		 * The map has no space.  This may be due to
314 		 * fragmentation, or because the cursor is near the
315 		 * end of the map.
316 		 */
317 		if (memguard_cursor == vm_map_min(memguard_map)) {
318 			memguard_fail_kva++;
319 			addr = (vm_offset_t)NULL;
320 			goto out;
321 		}
322 		memguard_wrap++;
323 		memguard_cursor = vm_map_min(memguard_map);
324 	}
325 	if (do_guard)
326 		addr += PAGE_SIZE;
327 	rv = kmem_back(memguard_map, addr, size_p, flags);
328 	if (rv != KERN_SUCCESS) {
329 		memguard_fail_pgs++;
330 		addr = (vm_offset_t)NULL;
331 		goto out;
332 	}
333 	memguard_cursor = addr + size_p;
334 	*v2sizep(trunc_page(addr)) = req_size;
335 	memguard_succ++;
336 	if (req_size < PAGE_SIZE) {
337 		memguard_wasted += (PAGE_SIZE - req_size);
338 		if (do_guard) {
339 			/*
340 			 * Align the request to 16 bytes, and return
341 			 * an address near the end of the page, to
342 			 * better detect array overrun.
343 			 */
344 			req_size = roundup2(req_size, 16);
345 			addr += (PAGE_SIZE - req_size);
346 		}
347 	}
348 out:
349 	vm_map_unlock(memguard_map);
350 	return ((void *)addr);
351 }
352 
353 int
354 is_memguard_addr(void *addr)
355 {
356 	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
357 
358 	return (a >= memguard_map->min_offset && a < memguard_map->max_offset);
359 }
360 
361 /*
362  * Free specified single object.
363  */
364 void
365 memguard_free(void *ptr)
366 {
367 	vm_offset_t addr;
368 	u_long req_size, size;
369 	char *temp;
370 	int i;
371 
372 	addr = trunc_page((uintptr_t)ptr);
373 	req_size = *v2sizep(addr);
374 	size = round_page(req_size);
375 
376 	/*
377 	 * Page should not be guarded right now, so force a write.
378 	 * The purpose of this is to increase the likelihood of
379 	 * catching a double-free, but not necessarily a
380 	 * tamper-after-free (the second thread freeing might not
381 	 * write before freeing, so this forces it to and,
382 	 * subsequently, trigger a fault).
383 	 */
384 	temp = ptr;
385 	for (i = 0; i < size; i += PAGE_SIZE)
386 		temp[i] = 'M';
387 
388 	/*
389 	 * This requires carnal knowledge of the implementation of
390 	 * kmem_free(), but since we've already replaced kmem_malloc()
391 	 * above, it's not really any worse.  We want to use the
392 	 * vm_map lock to serialize updates to memguard_wasted, since
393 	 * we had the lock at increment.
394 	 */
395 	vm_map_lock(memguard_map);
396 	if (req_size < PAGE_SIZE)
397 		memguard_wasted -= (PAGE_SIZE - req_size);
398 	(void)vm_map_delete(memguard_map, addr, addr + size);
399 	vm_map_unlock(memguard_map);
400 }
401 
402 int
403 memguard_cmp(struct malloc_type *mtp, unsigned long size)
404 {
405 
406 	if (size < memguard_minsize) {
407 		memguard_minsize_reject++;
408 		return (0);
409 	}
410 	if ((memguard_options & MG_ALLLARGE) != 0 && size >= PAGE_SIZE)
411 		return (1);
412 	if (memguard_frequency > 0 &&
413 	    (random() % 100000) < memguard_frequency) {
414 		memguard_frequency_hits++;
415 		return (1);
416 	}
417 #if 1
418 	/*
419 	 * The safest way of comparsion is to always compare short description
420 	 * string of memory type, but it is also the slowest way.
421 	 */
422 	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
423 #else
424 	/*
425 	 * If we compare pointers, there are two possible problems:
426 	 * 1. Memory type was unloaded and new memory type was allocated at the
427 	 *    same address.
428 	 * 2. Memory type was unloaded and loaded again, but allocated at a
429 	 *    different address.
430 	 */
431 	if (vm_memguard_mtype != NULL)
432 		return (mtp == vm_memguard_mtype);
433 	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
434 		vm_memguard_mtype = mtp;
435 		return (1);
436 	}
437 	return (0);
438 #endif
439 }
440