1 /*- 2 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>. 3 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * MemGuard is a simple replacement allocator for debugging only 33 * which provides ElectricFence-style memory barrier protection on 34 * objects being allocated, and is used to detect tampering-after-free 35 * scenarios. 36 * 37 * See the memguard(9) man page for more information on using MemGuard. 38 */ 39 40 #include "opt_vm.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/types.h> 46 #include <sys/queue.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/malloc.h> 50 #include <sys/sysctl.h> 51 52 #include <vm/vm.h> 53 #include <vm/uma.h> 54 #include <vm/vm_param.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_extern.h> 59 #include <vm/memguard.h> 60 61 SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); 62 /* 63 * The vm_memguard_divisor variable controls how much of kmem_map should be 64 * reserved for MemGuard. 65 */ 66 static u_int vm_memguard_divisor; 67 SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN, 68 &vm_memguard_divisor, 69 0, "(kmem_size/memguard_divisor) == memguard submap size"); 70 71 /* 72 * Short description (ks_shortdesc) of memory type to monitor. 73 */ 74 static char vm_memguard_desc[128] = ""; 75 static struct malloc_type *vm_memguard_mtype = NULL; 76 TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc)); 77 static int 78 memguard_sysctl_desc(SYSCTL_HANDLER_ARGS) 79 { 80 char desc[sizeof(vm_memguard_desc)]; 81 int error; 82 83 strlcpy(desc, vm_memguard_desc, sizeof(desc)); 84 error = sysctl_handle_string(oidp, desc, sizeof(desc), req); 85 if (error != 0 || req->newptr == NULL) 86 return (error); 87 88 mtx_lock(&malloc_mtx); 89 /* 90 * If mtp is NULL, it will be initialized in memguard_cmp(). 91 */ 92 vm_memguard_mtype = malloc_desc2type(desc); 93 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); 94 mtx_unlock(&malloc_mtx); 95 return (error); 96 } 97 SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, 98 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 99 memguard_sysctl_desc, "A", "Short description of memory type to monitor"); 100 101 static vm_map_t memguard_map = NULL; 102 static vm_offset_t memguard_cursor; 103 static vm_size_t memguard_mapsize; 104 static vm_size_t memguard_physlimit; 105 static u_long memguard_wasted; 106 static u_long memguard_wrap; 107 static u_long memguard_succ; 108 static u_long memguard_fail_kva; 109 static u_long memguard_fail_pgs; 110 111 SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD, 112 &memguard_cursor, 0, "MemGuard cursor"); 113 SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD, 114 &memguard_mapsize, 0, "MemGuard private vm_map size"); 115 SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD, 116 &memguard_physlimit, 0, "Limit on MemGuard memory consumption"); 117 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD, 118 &memguard_wasted, 0, "Excess memory used through page promotion"); 119 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD, 120 &memguard_wrap, 0, "MemGuard cursor wrap count"); 121 SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 122 &memguard_succ, 0, "Count of successful MemGuard allocations"); 123 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD, 124 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA"); 125 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD, 126 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages"); 127 128 #define MG_GUARD 0x001 129 #define MG_ALLLARGE 0x002 130 static int memguard_options = MG_GUARD; 131 TUNABLE_INT("vm.memguard.options", &memguard_options); 132 SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW, 133 &memguard_options, 0, 134 "MemGuard options:\n" 135 "\t0x001 - add guard pages around each allocation\n" 136 "\t0x002 - always use MemGuard for allocations over a page"); 137 138 static u_int memguard_minsize; 139 static u_long memguard_minsize_reject; 140 SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW, 141 &memguard_minsize, 0, "Minimum size for page promotion"); 142 SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD, 143 &memguard_minsize_reject, 0, "# times rejected for size"); 144 145 static u_int memguard_frequency; 146 static u_long memguard_frequency_hits; 147 TUNABLE_INT("vm.memguard.frequency", &memguard_frequency); 148 SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RW, 149 &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run"); 150 SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, 151 &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); 152 153 154 /* 155 * Return a fudged value to be used for vm_kmem_size for allocating 156 * the kmem_map. The memguard memory will be a submap. 157 */ 158 unsigned long 159 memguard_fudge(unsigned long km_size, unsigned long km_max) 160 { 161 u_long mem_pgs = cnt.v_page_count; 162 163 vm_memguard_divisor = 10; 164 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 165 166 /* Pick a conservative value if provided value sucks. */ 167 if ((vm_memguard_divisor <= 0) || 168 ((km_size / vm_memguard_divisor) == 0)) 169 vm_memguard_divisor = 10; 170 /* 171 * Limit consumption of physical pages to 172 * 1/vm_memguard_divisor of system memory. If the KVA is 173 * smaller than this then the KVA limit comes into play first. 174 * This prevents memguard's page promotions from completely 175 * using up memory, since most malloc(9) calls are sub-page. 176 */ 177 memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; 178 /* 179 * We want as much KVA as we can take safely. Use at most our 180 * allotted fraction of kmem_max. Limit this to twice the 181 * physical memory to avoid using too much memory as pagetable 182 * pages. 183 */ 184 memguard_mapsize = km_max / vm_memguard_divisor; 185 /* size must be multiple of PAGE_SIZE */ 186 memguard_mapsize = round_page(memguard_mapsize); 187 if (memguard_mapsize == 0 || 188 memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) 189 memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; 190 if (km_max > 0 && km_size + memguard_mapsize > km_max) 191 return (km_max); 192 return (km_size + memguard_mapsize); 193 } 194 195 /* 196 * Initialize the MemGuard mock allocator. All objects from MemGuard come 197 * out of a single VM map (contiguous chunk of address space). 198 */ 199 void 200 memguard_init(vm_map_t parent_map) 201 { 202 vm_offset_t base, limit; 203 204 memguard_map = kmem_suballoc(parent_map, &base, &limit, 205 memguard_mapsize, FALSE); 206 memguard_map->system_map = 1; 207 KASSERT(memguard_mapsize == limit - base, 208 ("Expected %lu, got %lu", (u_long)memguard_mapsize, 209 (u_long)(limit - base))); 210 memguard_cursor = base; 211 212 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); 213 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base); 214 printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit); 215 printf("\tMEMGUARD map size: %jd KBytes\n", 216 (uintmax_t)memguard_mapsize >> 10); 217 } 218 219 /* 220 * Run things that can't be done as early as memguard_init(). 221 */ 222 static void 223 memguard_sysinit(void) 224 { 225 struct sysctl_oid_list *parent; 226 227 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard); 228 229 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD, 230 &memguard_map->min_offset, "MemGuard KVA base"); 231 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD, 232 &memguard_map->max_offset, "MemGuard KVA end"); 233 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD, 234 &memguard_map->size, "MemGuard KVA used"); 235 } 236 SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL); 237 238 /* 239 * v2sizep() converts a virtual address of the first page allocated for 240 * an item to a pointer to u_long recording the size of the original 241 * allocation request. 242 * 243 * This routine is very similar to those defined by UMA in uma_int.h. 244 * The difference is that this routine stores the originally allocated 245 * size in one of the page's fields that is unused when the page is 246 * wired rather than the object field, which is used. 247 */ 248 static u_long * 249 v2sizep(vm_offset_t va) 250 { 251 vm_paddr_t pa; 252 struct vm_page *p; 253 254 pa = pmap_kextract(va); 255 if (pa == 0) 256 panic("MemGuard detected double-free of %p", (void *)va); 257 p = PHYS_TO_VM_PAGE(pa); 258 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 259 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 260 return ((u_long *)&p->pageq.tqe_next); 261 } 262 263 /* 264 * Allocate a single object of specified size with specified flags 265 * (either M_WAITOK or M_NOWAIT). 266 */ 267 void * 268 memguard_alloc(unsigned long req_size, int flags) 269 { 270 vm_offset_t addr; 271 u_long size_p, size_v; 272 int do_guard, rv; 273 274 size_p = round_page(req_size); 275 if (size_p == 0) 276 return (NULL); 277 /* 278 * To ensure there are holes on both sides of the allocation, 279 * request 2 extra pages of KVA. We will only actually add a 280 * vm_map_entry and get pages for the original request. Save 281 * the value of memguard_options so we have a consistent 282 * value. 283 */ 284 size_v = size_p; 285 do_guard = (memguard_options & MG_GUARD) != 0; 286 if (do_guard) 287 size_v += 2 * PAGE_SIZE; 288 289 vm_map_lock(memguard_map); 290 /* 291 * When we pass our memory limit, reject sub-page allocations. 292 * Page-size and larger allocations will use the same amount 293 * of physical memory whether we allocate or hand off to 294 * uma_large_alloc(), so keep those. 295 */ 296 if (memguard_map->size >= memguard_physlimit && 297 req_size < PAGE_SIZE) { 298 addr = (vm_offset_t)NULL; 299 memguard_fail_pgs++; 300 goto out; 301 } 302 /* 303 * Keep a moving cursor so we don't recycle KVA as long as 304 * possible. It's not perfect, since we don't know in what 305 * order previous allocations will be free'd, but it's simple 306 * and fast, and requires O(1) additional storage if guard 307 * pages are not used. 308 * 309 * XXX This scheme will lead to greater fragmentation of the 310 * map, unless vm_map_findspace() is tweaked. 311 */ 312 for (;;) { 313 rv = vm_map_findspace(memguard_map, memguard_cursor, 314 size_v, &addr); 315 if (rv == KERN_SUCCESS) 316 break; 317 /* 318 * The map has no space. This may be due to 319 * fragmentation, or because the cursor is near the 320 * end of the map. 321 */ 322 if (memguard_cursor == vm_map_min(memguard_map)) { 323 memguard_fail_kva++; 324 addr = (vm_offset_t)NULL; 325 goto out; 326 } 327 memguard_wrap++; 328 memguard_cursor = vm_map_min(memguard_map); 329 } 330 if (do_guard) 331 addr += PAGE_SIZE; 332 rv = kmem_back(memguard_map, addr, size_p, flags); 333 if (rv != KERN_SUCCESS) { 334 memguard_fail_pgs++; 335 addr = (vm_offset_t)NULL; 336 goto out; 337 } 338 memguard_cursor = addr + size_p; 339 *v2sizep(trunc_page(addr)) = req_size; 340 memguard_succ++; 341 if (req_size < PAGE_SIZE) { 342 memguard_wasted += (PAGE_SIZE - req_size); 343 if (do_guard) { 344 /* 345 * Align the request to 16 bytes, and return 346 * an address near the end of the page, to 347 * better detect array overrun. 348 */ 349 req_size = roundup2(req_size, 16); 350 addr += (PAGE_SIZE - req_size); 351 } 352 } 353 out: 354 vm_map_unlock(memguard_map); 355 return ((void *)addr); 356 } 357 358 int 359 is_memguard_addr(void *addr) 360 { 361 vm_offset_t a = (vm_offset_t)(uintptr_t)addr; 362 363 return (a >= memguard_map->min_offset && a < memguard_map->max_offset); 364 } 365 366 /* 367 * Free specified single object. 368 */ 369 void 370 memguard_free(void *ptr) 371 { 372 vm_offset_t addr; 373 u_long req_size, size; 374 char *temp; 375 int i; 376 377 addr = trunc_page((uintptr_t)ptr); 378 req_size = *v2sizep(addr); 379 size = round_page(req_size); 380 381 /* 382 * Page should not be guarded right now, so force a write. 383 * The purpose of this is to increase the likelihood of 384 * catching a double-free, but not necessarily a 385 * tamper-after-free (the second thread freeing might not 386 * write before freeing, so this forces it to and, 387 * subsequently, trigger a fault). 388 */ 389 temp = ptr; 390 for (i = 0; i < size; i += PAGE_SIZE) 391 temp[i] = 'M'; 392 393 /* 394 * This requires carnal knowledge of the implementation of 395 * kmem_free(), but since we've already replaced kmem_malloc() 396 * above, it's not really any worse. We want to use the 397 * vm_map lock to serialize updates to memguard_wasted, since 398 * we had the lock at increment. 399 */ 400 vm_map_lock(memguard_map); 401 if (req_size < PAGE_SIZE) 402 memguard_wasted -= (PAGE_SIZE - req_size); 403 (void)vm_map_delete(memguard_map, addr, addr + size); 404 vm_map_unlock(memguard_map); 405 } 406 407 /* 408 * Re-allocate an allocation that was originally guarded. 409 */ 410 void * 411 memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp, 412 int flags) 413 { 414 void *newaddr; 415 u_long old_size; 416 417 /* 418 * Allocate the new block. Force the allocation to be guarded 419 * as the original may have been guarded through random 420 * chance, and that should be preserved. 421 */ 422 if ((newaddr = memguard_alloc(size, flags)) == NULL) 423 return (NULL); 424 425 /* Copy over original contents. */ 426 old_size = *v2sizep(trunc_page((uintptr_t)addr)); 427 bcopy(addr, newaddr, min(size, old_size)); 428 memguard_free(addr); 429 return (newaddr); 430 } 431 432 int 433 memguard_cmp(struct malloc_type *mtp, unsigned long size) 434 { 435 436 if (size < memguard_minsize) { 437 memguard_minsize_reject++; 438 return (0); 439 } 440 if ((memguard_options & MG_ALLLARGE) != 0 && size >= PAGE_SIZE) 441 return (1); 442 if (memguard_frequency > 0 && 443 (random() % 100000) < memguard_frequency) { 444 memguard_frequency_hits++; 445 return (1); 446 } 447 #if 1 448 /* 449 * The safest way of comparsion is to always compare short description 450 * string of memory type, but it is also the slowest way. 451 */ 452 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); 453 #else 454 /* 455 * If we compare pointers, there are two possible problems: 456 * 1. Memory type was unloaded and new memory type was allocated at the 457 * same address. 458 * 2. Memory type was unloaded and loaded again, but allocated at a 459 * different address. 460 */ 461 if (vm_memguard_mtype != NULL) 462 return (mtp == vm_memguard_mtype); 463 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { 464 vm_memguard_mtype = mtp; 465 return (1); 466 } 467 return (0); 468 #endif 469 } 470