1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005, Bosko Milekic <bmilekic@FreeBSD.org>. 5 * Copyright (c) 2010 Isilon Systems, Inc. (http://www.isilon.com/) 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * MemGuard is a simple replacement allocator for debugging only 35 * which provides ElectricFence-style memory barrier protection on 36 * objects being allocated, and is used to detect tampering-after-free 37 * scenarios. 38 * 39 * See the memguard(9) man page for more information on using MemGuard. 40 */ 41 42 #include "opt_vm.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/types.h> 48 #include <sys/queue.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/malloc.h> 52 #include <sys/sysctl.h> 53 #include <sys/vmem.h> 54 #include <sys/vmmeter.h> 55 56 #include <vm/vm.h> 57 #include <vm/uma.h> 58 #include <vm/vm_param.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_extern.h> 64 #include <vm/uma_int.h> 65 #include <vm/memguard.h> 66 67 static SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data"); 68 /* 69 * The vm_memguard_divisor variable controls how much of kernel_arena should be 70 * reserved for MemGuard. 71 */ 72 static u_int vm_memguard_divisor; 73 SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 74 &vm_memguard_divisor, 75 0, "(kmem_size/memguard_divisor) == memguard submap size"); 76 77 /* 78 * Short description (ks_shortdesc) of memory type to monitor. 79 */ 80 static char vm_memguard_desc[128] = ""; 81 static struct malloc_type *vm_memguard_mtype = NULL; 82 TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc)); 83 static int 84 memguard_sysctl_desc(SYSCTL_HANDLER_ARGS) 85 { 86 char desc[sizeof(vm_memguard_desc)]; 87 int error; 88 89 strlcpy(desc, vm_memguard_desc, sizeof(desc)); 90 error = sysctl_handle_string(oidp, desc, sizeof(desc), req); 91 if (error != 0 || req->newptr == NULL) 92 return (error); 93 94 mtx_lock(&malloc_mtx); 95 /* If mtp is NULL, it will be initialized in memguard_cmp() */ 96 vm_memguard_mtype = malloc_desc2type(desc); 97 strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc)); 98 mtx_unlock(&malloc_mtx); 99 return (error); 100 } 101 SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, 102 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 103 memguard_sysctl_desc, "A", "Short description of memory type to monitor"); 104 105 static vm_offset_t memguard_cursor; 106 static vm_offset_t memguard_base; 107 static vm_size_t memguard_mapsize; 108 static vm_size_t memguard_physlimit; 109 static u_long memguard_wasted; 110 static u_long memguard_wrap; 111 static u_long memguard_succ; 112 static u_long memguard_fail_kva; 113 static u_long memguard_fail_pgs; 114 115 SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD, 116 &memguard_cursor, 0, "MemGuard cursor"); 117 SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD, 118 &memguard_mapsize, 0, "MemGuard private arena size"); 119 SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD, 120 &memguard_physlimit, 0, "Limit on MemGuard memory consumption"); 121 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD, 122 &memguard_wasted, 0, "Excess memory used through page promotion"); 123 SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD, 124 &memguard_wrap, 0, "MemGuard cursor wrap count"); 125 SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD, 126 &memguard_succ, 0, "Count of successful MemGuard allocations"); 127 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD, 128 &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA"); 129 SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD, 130 &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages"); 131 132 #define MG_GUARD_AROUND 0x001 133 #define MG_GUARD_ALLLARGE 0x002 134 #define MG_GUARD_NOFREE 0x004 135 static int memguard_options = MG_GUARD_AROUND; 136 SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RWTUN, 137 &memguard_options, 0, 138 "MemGuard options:\n" 139 "\t0x001 - add guard pages around each allocation\n" 140 "\t0x002 - always use MemGuard for allocations over a page\n" 141 "\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag"); 142 143 static u_int memguard_minsize; 144 static u_long memguard_minsize_reject; 145 SYSCTL_UINT(_vm_memguard, OID_AUTO, minsize, CTLFLAG_RW, 146 &memguard_minsize, 0, "Minimum size for page promotion"); 147 SYSCTL_ULONG(_vm_memguard, OID_AUTO, minsize_reject, CTLFLAG_RD, 148 &memguard_minsize_reject, 0, "# times rejected for size"); 149 150 static u_int memguard_frequency; 151 static u_long memguard_frequency_hits; 152 SYSCTL_UINT(_vm_memguard, OID_AUTO, frequency, CTLFLAG_RWTUN, 153 &memguard_frequency, 0, "Times in 100000 that MemGuard will randomly run"); 154 SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD, 155 &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); 156 157 158 /* 159 * Return a fudged value to be used for vm_kmem_size for allocating 160 * the kernel_arena. The memguard memory will be a submap. 161 */ 162 unsigned long 163 memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) 164 { 165 u_long mem_pgs, parent_size; 166 167 vm_memguard_divisor = 10; 168 /* CTFLAG_RDTUN doesn't work during the early boot process. */ 169 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 170 171 parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) + 172 PAGE_SIZE; 173 /* Pick a conservative value if provided value sucks. */ 174 if ((vm_memguard_divisor <= 0) || 175 ((parent_size / vm_memguard_divisor) == 0)) 176 vm_memguard_divisor = 10; 177 /* 178 * Limit consumption of physical pages to 179 * 1/vm_memguard_divisor of system memory. If the KVA is 180 * smaller than this then the KVA limit comes into play first. 181 * This prevents memguard's page promotions from completely 182 * using up memory, since most malloc(9) calls are sub-page. 183 */ 184 mem_pgs = vm_cnt.v_page_count; 185 memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; 186 /* 187 * We want as much KVA as we can take safely. Use at most our 188 * allotted fraction of the parent map's size. Limit this to 189 * twice the physical memory to avoid using too much memory as 190 * pagetable pages (size must be multiple of PAGE_SIZE). 191 */ 192 memguard_mapsize = round_page(parent_size / vm_memguard_divisor); 193 if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) 194 memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; 195 if (km_size + memguard_mapsize > parent_size) 196 memguard_mapsize = 0; 197 return (km_size + memguard_mapsize); 198 } 199 200 /* 201 * Initialize the MemGuard mock allocator. All objects from MemGuard come 202 * out of a single VM map (contiguous chunk of address space). 203 */ 204 void 205 memguard_init(vmem_t *parent) 206 { 207 vm_offset_t base; 208 209 vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base); 210 vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize, 211 PAGE_SIZE, 0, M_WAITOK); 212 memguard_cursor = base; 213 memguard_base = base; 214 215 printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); 216 printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base); 217 printf("\tMEMGUARD map size: %jd KBytes\n", 218 (uintmax_t)memguard_mapsize >> 10); 219 } 220 221 /* 222 * Run things that can't be done as early as memguard_init(). 223 */ 224 static void 225 memguard_sysinit(void) 226 { 227 struct sysctl_oid_list *parent; 228 229 parent = SYSCTL_STATIC_CHILDREN(_vm_memguard); 230 231 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD, 232 &memguard_base, "MemGuard KVA base"); 233 SYSCTL_ADD_UAUTO(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD, 234 &memguard_mapsize, "MemGuard KVA size"); 235 #if 0 236 SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD, 237 &memguard_map->size, "MemGuard KVA used"); 238 #endif 239 } 240 SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL); 241 242 /* 243 * v2sizep() converts a virtual address of the first page allocated for 244 * an item to a pointer to u_long recording the size of the original 245 * allocation request. 246 * 247 * This routine is very similar to those defined by UMA in uma_int.h. 248 * The difference is that this routine stores the originally allocated 249 * size in one of the page's fields that is unused when the page is 250 * wired rather than the object field, which is used. 251 */ 252 static u_long * 253 v2sizep(vm_offset_t va) 254 { 255 vm_paddr_t pa; 256 struct vm_page *p; 257 258 pa = pmap_kextract(va); 259 if (pa == 0) 260 panic("MemGuard detected double-free of %p", (void *)va); 261 p = PHYS_TO_VM_PAGE(pa); 262 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 263 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 264 return (&p->plinks.memguard.p); 265 } 266 267 static u_long * 268 v2sizev(vm_offset_t va) 269 { 270 vm_paddr_t pa; 271 struct vm_page *p; 272 273 pa = pmap_kextract(va); 274 if (pa == 0) 275 panic("MemGuard detected double-free of %p", (void *)va); 276 p = PHYS_TO_VM_PAGE(pa); 277 KASSERT(p->wire_count != 0 && p->queue == PQ_NONE, 278 ("MEMGUARD: Expected wired page %p in vtomgfifo!", p)); 279 return (&p->plinks.memguard.v); 280 } 281 282 /* 283 * Allocate a single object of specified size with specified flags 284 * (either M_WAITOK or M_NOWAIT). 285 */ 286 void * 287 memguard_alloc(unsigned long req_size, int flags) 288 { 289 vm_offset_t addr, origaddr; 290 u_long size_p, size_v; 291 int do_guard, rv; 292 293 size_p = round_page(req_size); 294 if (size_p == 0) 295 return (NULL); 296 /* 297 * To ensure there are holes on both sides of the allocation, 298 * request 2 extra pages of KVA. We will only actually add a 299 * vm_map_entry and get pages for the original request. Save 300 * the value of memguard_options so we have a consistent 301 * value. 302 */ 303 size_v = size_p; 304 do_guard = (memguard_options & MG_GUARD_AROUND) != 0; 305 if (do_guard) 306 size_v += 2 * PAGE_SIZE; 307 308 /* 309 * When we pass our memory limit, reject sub-page allocations. 310 * Page-size and larger allocations will use the same amount 311 * of physical memory whether we allocate or hand off to 312 * uma_large_alloc(), so keep those. 313 */ 314 if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit && 315 req_size < PAGE_SIZE) { 316 addr = (vm_offset_t)NULL; 317 memguard_fail_pgs++; 318 goto out; 319 } 320 /* 321 * Keep a moving cursor so we don't recycle KVA as long as 322 * possible. It's not perfect, since we don't know in what 323 * order previous allocations will be free'd, but it's simple 324 * and fast, and requires O(1) additional storage if guard 325 * pages are not used. 326 * 327 * XXX This scheme will lead to greater fragmentation of the 328 * map, unless vm_map_findspace() is tweaked. 329 */ 330 for (;;) { 331 if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0, 332 memguard_cursor, VMEM_ADDR_MAX, 333 M_BESTFIT | M_NOWAIT, &origaddr) == 0) 334 break; 335 /* 336 * The map has no space. This may be due to 337 * fragmentation, or because the cursor is near the 338 * end of the map. 339 */ 340 if (memguard_cursor == memguard_base) { 341 memguard_fail_kva++; 342 addr = (vm_offset_t)NULL; 343 goto out; 344 } 345 memguard_wrap++; 346 memguard_cursor = memguard_base; 347 } 348 addr = origaddr; 349 if (do_guard) 350 addr += PAGE_SIZE; 351 rv = kmem_back(kernel_object, addr, size_p, flags); 352 if (rv != KERN_SUCCESS) { 353 vmem_xfree(memguard_arena, origaddr, size_v); 354 memguard_fail_pgs++; 355 addr = (vm_offset_t)NULL; 356 goto out; 357 } 358 memguard_cursor = addr + size_v; 359 *v2sizep(trunc_page(addr)) = req_size; 360 *v2sizev(trunc_page(addr)) = size_v; 361 memguard_succ++; 362 if (req_size < PAGE_SIZE) { 363 memguard_wasted += (PAGE_SIZE - req_size); 364 if (do_guard) { 365 /* 366 * Align the request to 16 bytes, and return 367 * an address near the end of the page, to 368 * better detect array overrun. 369 */ 370 req_size = roundup2(req_size, 16); 371 addr += (PAGE_SIZE - req_size); 372 } 373 } 374 out: 375 return ((void *)addr); 376 } 377 378 int 379 is_memguard_addr(void *addr) 380 { 381 vm_offset_t a = (vm_offset_t)(uintptr_t)addr; 382 383 return (a >= memguard_base && a < memguard_base + memguard_mapsize); 384 } 385 386 /* 387 * Free specified single object. 388 */ 389 void 390 memguard_free(void *ptr) 391 { 392 vm_offset_t addr; 393 u_long req_size, size, sizev; 394 char *temp; 395 int i; 396 397 addr = trunc_page((uintptr_t)ptr); 398 req_size = *v2sizep(addr); 399 sizev = *v2sizev(addr); 400 size = round_page(req_size); 401 402 /* 403 * Page should not be guarded right now, so force a write. 404 * The purpose of this is to increase the likelihood of 405 * catching a double-free, but not necessarily a 406 * tamper-after-free (the second thread freeing might not 407 * write before freeing, so this forces it to and, 408 * subsequently, trigger a fault). 409 */ 410 temp = ptr; 411 for (i = 0; i < size; i += PAGE_SIZE) 412 temp[i] = 'M'; 413 414 /* 415 * This requires carnal knowledge of the implementation of 416 * kmem_free(), but since we've already replaced kmem_malloc() 417 * above, it's not really any worse. We want to use the 418 * vm_map lock to serialize updates to memguard_wasted, since 419 * we had the lock at increment. 420 */ 421 kmem_unback(kernel_object, addr, size); 422 if (sizev > size) 423 addr -= PAGE_SIZE; 424 vmem_xfree(memguard_arena, addr, sizev); 425 if (req_size < PAGE_SIZE) 426 memguard_wasted -= (PAGE_SIZE - req_size); 427 } 428 429 /* 430 * Re-allocate an allocation that was originally guarded. 431 */ 432 void * 433 memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp, 434 int flags) 435 { 436 void *newaddr; 437 u_long old_size; 438 439 /* 440 * Allocate the new block. Force the allocation to be guarded 441 * as the original may have been guarded through random 442 * chance, and that should be preserved. 443 */ 444 if ((newaddr = memguard_alloc(size, flags)) == NULL) 445 return (NULL); 446 447 /* Copy over original contents. */ 448 old_size = *v2sizep(trunc_page((uintptr_t)addr)); 449 bcopy(addr, newaddr, min(size, old_size)); 450 memguard_free(addr); 451 return (newaddr); 452 } 453 454 static int 455 memguard_cmp(unsigned long size) 456 { 457 458 if (size < memguard_minsize) { 459 memguard_minsize_reject++; 460 return (0); 461 } 462 if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE) 463 return (1); 464 if (memguard_frequency > 0 && 465 (random() % 100000) < memguard_frequency) { 466 memguard_frequency_hits++; 467 return (1); 468 } 469 470 return (0); 471 } 472 473 int 474 memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size) 475 { 476 477 if (memguard_cmp(size)) 478 return(1); 479 480 #if 1 481 /* 482 * The safest way of comparsion is to always compare short description 483 * string of memory type, but it is also the slowest way. 484 */ 485 return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0); 486 #else 487 /* 488 * If we compare pointers, there are two possible problems: 489 * 1. Memory type was unloaded and new memory type was allocated at the 490 * same address. 491 * 2. Memory type was unloaded and loaded again, but allocated at a 492 * different address. 493 */ 494 if (vm_memguard_mtype != NULL) 495 return (mtp == vm_memguard_mtype); 496 if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) { 497 vm_memguard_mtype = mtp; 498 return (1); 499 } 500 return (0); 501 #endif 502 } 503 504 int 505 memguard_cmp_zone(uma_zone_t zone) 506 { 507 508 if ((memguard_options & MG_GUARD_NOFREE) == 0 && 509 zone->uz_flags & UMA_ZONE_NOFREE) 510 return (0); 511 512 if (memguard_cmp(zone->uz_size)) 513 return (1); 514 515 /* 516 * The safest way of comparsion is to always compare zone name, 517 * but it is also the slowest way. 518 */ 519 return (strcmp(zone->uz_name, vm_memguard_desc) == 0); 520 } 521