1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014 Ian Lepore <ian@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_acpi.h" 33 #include "opt_ddb.h" 34 35 /* 36 * Routines for describing and initializing anything related to physical memory. 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/physmem.h> 43 #include <vm/vm.h> 44 #include <vm/vm_param.h> 45 #include <vm/vm_page.h> 46 #include <vm/vm_phys.h> 47 #include <vm/vm_dumpset.h> 48 #include <machine/md_var.h> 49 50 /* 51 * These structures are used internally to keep track of regions of physical 52 * ram, and regions within the physical ram that need to be excluded. An 53 * exclusion region can be excluded from crash dumps, from the vm pool of pages 54 * that can be allocated, or both, depending on the exclusion flags associated 55 * with the region. 56 */ 57 #ifdef DEV_ACPI 58 #define MAX_HWCNT 32 /* ACPI needs more regions */ 59 #define MAX_EXCNT 32 60 #else 61 #define MAX_HWCNT 16 62 #define MAX_EXCNT 16 63 #endif 64 65 #if defined(__arm__) 66 #define MAX_PHYS_ADDR 0xFFFFFFFFull 67 #elif defined(__aarch64__) || defined(__riscv) 68 #define MAX_PHYS_ADDR 0xFFFFFFFFFFFFFFFFull 69 #endif 70 71 struct region { 72 vm_paddr_t addr; 73 vm_size_t size; 74 uint32_t flags; 75 }; 76 77 static struct region hwregions[MAX_HWCNT]; 78 static struct region exregions[MAX_EXCNT]; 79 80 static size_t hwcnt; 81 static size_t excnt; 82 83 /* 84 * realmem is the total number of hardware pages, excluded or not. 85 * Maxmem is one greater than the last physical page number. 86 */ 87 long realmem; 88 long Maxmem; 89 90 /* 91 * Print the contents of the physical and excluded region tables using the 92 * provided printf-like output function (which will be either printf or 93 * db_printf). 94 */ 95 static void 96 physmem_dump_tables(int (*prfunc)(const char *, ...)) 97 { 98 int flags, i; 99 uintmax_t addr, size; 100 const unsigned int mbyte = 1024 * 1024; 101 102 prfunc("Physical memory chunk(s):\n"); 103 for (i = 0; i < hwcnt; ++i) { 104 addr = hwregions[i].addr; 105 size = hwregions[i].size; 106 prfunc(" 0x%08jx - 0x%08jx, %5ju MB (%7ju pages)\n", addr, 107 addr + size - 1, size / mbyte, size / PAGE_SIZE); 108 } 109 110 prfunc("Excluded memory regions:\n"); 111 for (i = 0; i < excnt; ++i) { 112 addr = exregions[i].addr; 113 size = exregions[i].size; 114 flags = exregions[i].flags; 115 prfunc(" 0x%08jx - 0x%08jx, %5ju MB (%7ju pages) %s %s\n", 116 addr, addr + size - 1, size / mbyte, size / PAGE_SIZE, 117 (flags & EXFLAG_NOALLOC) ? "NoAlloc" : "", 118 (flags & EXFLAG_NODUMP) ? "NoDump" : ""); 119 } 120 121 #ifdef DEBUG 122 prfunc("Avail lists:\n"); 123 for (i = 0; phys_avail[i] != 0; ++i) { 124 prfunc(" phys_avail[%d] 0x%08x\n", i, phys_avail[i]); 125 } 126 for (i = 0; dump_avail[i] != 0; ++i) { 127 prfunc(" dump_avail[%d] 0x%08x\n", i, dump_avail[i]); 128 } 129 #endif 130 } 131 132 /* 133 * Print the contents of the static mapping table. Used for bootverbose. 134 */ 135 void 136 physmem_print_tables(void) 137 { 138 139 physmem_dump_tables(printf); 140 } 141 142 /* 143 * Walk the list of hardware regions, processing it against the list of 144 * exclusions that contain the given exflags, and generating an "avail list". 145 * 146 * If maxphyssz is not zero it sets upper limit, in bytes, for the total 147 * "avail list" size. Walk stops once the limit is reached and the last region 148 * is cut short if necessary. 149 * 150 * Updates the value at *pavail with the sum of all pages in all hw regions. 151 * 152 * Returns the number of pages of non-excluded memory added to the avail list. 153 */ 154 static size_t 155 regions_to_avail(vm_paddr_t *avail, uint32_t exflags, size_t maxavail, 156 uint64_t maxphyssz, long *pavail, long *prealmem) 157 { 158 size_t acnt, exi, hwi; 159 uint64_t end, start, xend, xstart; 160 long availmem, totalmem; 161 const struct region *exp, *hwp; 162 uint64_t availsz; 163 164 totalmem = 0; 165 availmem = 0; 166 availsz = 0; 167 acnt = 0; 168 for (hwi = 0, hwp = hwregions; hwi < hwcnt; ++hwi, ++hwp) { 169 start = hwp->addr; 170 end = hwp->size + start; 171 totalmem += atop((vm_offset_t)(end - start)); 172 for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) { 173 /* 174 * If the excluded region does not match given flags, 175 * continue checking with the next excluded region. 176 */ 177 if ((exp->flags & exflags) == 0) 178 continue; 179 xstart = exp->addr; 180 xend = exp->size + xstart; 181 /* 182 * If the excluded region ends before this hw region, 183 * continue checking with the next excluded region. 184 */ 185 if (xend <= start) 186 continue; 187 /* 188 * If the excluded region begins after this hw region 189 * we're done because both lists are sorted. 190 */ 191 if (xstart >= end) 192 break; 193 /* 194 * If the excluded region completely covers this hw 195 * region, shrink this hw region to zero size. 196 */ 197 if ((start >= xstart) && (end <= xend)) { 198 start = xend; 199 end = xend; 200 break; 201 } 202 /* 203 * If the excluded region falls wholly within this hw 204 * region without abutting or overlapping the beginning 205 * or end, create an available entry from the leading 206 * fragment, then adjust the start of this hw region to 207 * the end of the excluded region, and continue checking 208 * the next excluded region because another exclusion 209 * could affect the remainder of this hw region. 210 */ 211 if ((xstart > start) && (xend < end)) { 212 213 if ((maxphyssz != 0) && 214 (availsz + xstart - start > maxphyssz)) { 215 xstart = maxphyssz + start - availsz; 216 } 217 if (xstart <= start) 218 continue; 219 if (acnt > 0 && 220 avail[acnt - 1] == (vm_paddr_t)start) { 221 avail[acnt - 1] = (vm_paddr_t)xstart; 222 } else { 223 avail[acnt++] = (vm_paddr_t)start; 224 avail[acnt++] = (vm_paddr_t)xstart; 225 } 226 availsz += (xstart - start); 227 availmem += atop((vm_offset_t)(xstart - start)); 228 start = xend; 229 continue; 230 } 231 /* 232 * We know the excluded region overlaps either the start 233 * or end of this hardware region (but not both), trim 234 * the excluded portion off the appropriate end. 235 */ 236 if (xstart <= start) 237 start = xend; 238 else 239 end = xstart; 240 } 241 /* 242 * If the trimming actions above left a non-zero size, create an 243 * available entry for it. 244 */ 245 if (end > start) { 246 if ((maxphyssz != 0) && 247 (availsz + end - start > maxphyssz)) { 248 end = maxphyssz + start - availsz; 249 } 250 if (end <= start) 251 break; 252 253 if (acnt > 0 && avail[acnt - 1] == (vm_paddr_t)start) { 254 avail[acnt - 1] = (vm_paddr_t)end; 255 } else { 256 avail[acnt++] = (vm_paddr_t)start; 257 avail[acnt++] = (vm_paddr_t)end; 258 } 259 availsz += end - start; 260 availmem += atop((vm_offset_t)(end - start)); 261 } 262 if (acnt >= maxavail) 263 panic("Not enough space in the dump/phys_avail arrays"); 264 } 265 266 if (pavail != NULL) 267 *pavail = availmem; 268 if (prealmem != NULL) 269 *prealmem = totalmem; 270 return (acnt); 271 } 272 273 /* 274 * Check if the region at idx can be merged with the region above it. 275 */ 276 static size_t 277 merge_upper_regions(struct region *regions, size_t rcnt, size_t idx) 278 { 279 struct region *lower, *upper; 280 vm_paddr_t lend, uend; 281 size_t i, mergecnt, movecnt; 282 283 lower = ®ions[idx]; 284 lend = lower->addr + lower->size; 285 286 /* 287 * Continue merging in upper entries as long as we have entries to 288 * merge; the new block could have spanned more than one, although one 289 * is likely the common case. 290 */ 291 for (i = idx + 1; i < rcnt; i++) { 292 upper = ®ions[i]; 293 if (lend < upper->addr || lower->flags != upper->flags) 294 break; 295 296 uend = upper->addr + upper->size; 297 if (uend > lend) { 298 lower->size += uend - lend; 299 lend = lower->addr + lower->size; 300 } 301 302 if (uend >= lend) { 303 /* 304 * If we didn't move past the end of the upper region, 305 * then we don't need to bother checking for another 306 * merge because it would have been done already. Just 307 * increment i once more to maintain the invariant that 308 * i is one past the last entry merged. 309 */ 310 i++; 311 break; 312 } 313 } 314 315 /* 316 * We merged in the entries from [idx + 1, i); physically move the tail 317 * end at [i, rcnt) if we need to. 318 */ 319 mergecnt = i - (idx + 1); 320 if (mergecnt > 0) { 321 movecnt = rcnt - i; 322 if (movecnt == 0) { 323 /* Merged all the way to the end, just decrease rcnt. */ 324 rcnt = idx + 1; 325 } else { 326 memmove(®ions[idx + 1], ®ions[idx + mergecnt + 1], 327 movecnt * sizeof(*regions)); 328 rcnt -= mergecnt; 329 } 330 } 331 return (rcnt); 332 } 333 334 /* 335 * Insertion-sort a new entry into a regions list; sorted by start address. 336 */ 337 static size_t 338 insert_region(struct region *regions, size_t rcnt, vm_paddr_t addr, 339 vm_size_t size, uint32_t flags) 340 { 341 size_t i; 342 vm_paddr_t nend, rend; 343 struct region *ep, *rp; 344 345 nend = addr + size; 346 ep = regions + rcnt; 347 for (i = 0, rp = regions; i < rcnt; ++i, ++rp) { 348 if (flags == rp->flags) { 349 rend = rp->addr + rp->size; 350 if (addr <= rp->addr && nend >= rp->addr) { 351 /* 352 * New mapping overlaps at the beginning, shift 353 * for any difference in the beginning then 354 * shift if the new mapping extends past. 355 */ 356 rp->size += rp->addr - addr; 357 rp->addr = addr; 358 if (nend > rend) { 359 rp->size += nend - rend; 360 rcnt = merge_upper_regions(regions, 361 rcnt, i); 362 } 363 return (rcnt); 364 } else if (addr <= rend && nend > rp->addr) { 365 /* 366 * New mapping is either entirely contained 367 * within or it's overlapping at the end. 368 */ 369 if (nend > rend) { 370 rp->size += nend - rend; 371 rcnt = merge_upper_regions(regions, 372 rcnt, i); 373 } 374 return (rcnt); 375 } 376 } 377 if (addr < rp->addr) { 378 bcopy(rp, rp + 1, (ep - rp) * sizeof(*rp)); 379 break; 380 } 381 } 382 rp->addr = addr; 383 rp->size = size; 384 rp->flags = flags; 385 rcnt++; 386 387 return (rcnt); 388 } 389 390 /* 391 * Add a hardware memory region. 392 */ 393 void 394 physmem_hardware_region(uint64_t pa, uint64_t sz) 395 { 396 vm_offset_t adj; 397 398 /* 399 * Filter out the page at PA 0x00000000. The VM can't handle it, as 400 * pmap_extract() == 0 means failure. 401 */ 402 if (pa == 0) { 403 if (sz <= PAGE_SIZE) 404 return; 405 pa = PAGE_SIZE; 406 sz -= PAGE_SIZE; 407 } else if (pa > MAX_PHYS_ADDR) { 408 /* This range is past usable memory, ignore it */ 409 return; 410 } 411 412 /* 413 * Also filter out the page at the end of the physical address space -- 414 * if addr is non-zero and addr+size is zero we wrapped to the next byte 415 * beyond what vm_paddr_t can express. That leads to a NULL pointer 416 * deref early in startup; work around it by leaving the last page out. 417 * 418 * XXX This just in: subtract out a whole megabyte, not just 1 page. 419 * Reducing the size by anything less than 1MB results in the NULL 420 * pointer deref in _vm_map_lock_read(). Better to give up a megabyte 421 * than leave some folks with an unusable system while we investigate. 422 */ 423 if ((pa + sz) > (MAX_PHYS_ADDR - 1024 * 1024)) { 424 sz = MAX_PHYS_ADDR - pa + 1; 425 if (sz <= 1024 * 1024) 426 return; 427 sz -= 1024 * 1024; 428 } 429 430 /* 431 * Round the starting address up to a page boundary, and truncate the 432 * ending page down to a page boundary. 433 */ 434 adj = round_page(pa) - pa; 435 pa = round_page(pa); 436 sz = trunc_page(sz - adj); 437 438 if (sz > 0 && hwcnt < nitems(hwregions)) 439 hwcnt = insert_region(hwregions, hwcnt, pa, sz, 0); 440 } 441 442 /* 443 * Add an exclusion region. 444 */ 445 void 446 physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t exflags) 447 { 448 vm_offset_t adj; 449 450 /* 451 * Truncate the starting address down to a page boundary, and round the 452 * ending page up to a page boundary. 453 */ 454 adj = pa - trunc_page(pa); 455 pa = trunc_page(pa); 456 sz = round_page(sz + adj); 457 458 if (excnt >= nitems(exregions)) 459 panic("failed to exclude region %#jx-%#jx", (uintmax_t)pa, 460 (uintmax_t)(pa + sz)); 461 excnt = insert_region(exregions, excnt, pa, sz, exflags); 462 } 463 464 size_t 465 physmem_avail(vm_paddr_t *avail, size_t maxavail) 466 { 467 468 return (regions_to_avail(avail, EXFLAG_NOALLOC, maxavail, 0, NULL, NULL)); 469 } 470 471 /* 472 * Process all the regions added earlier into the global avail lists. 473 * 474 * Updates the kernel global 'physmem' with the number of physical pages 475 * available for use (all pages not in any exclusion region). 476 * 477 * Updates the kernel global 'Maxmem' with the page number one greater then the 478 * last page of physical memory in the system. 479 */ 480 void 481 physmem_init_kernel_globals(void) 482 { 483 size_t nextidx; 484 u_long hwphyssz; 485 486 hwphyssz = 0; 487 TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz); 488 489 regions_to_avail(dump_avail, EXFLAG_NODUMP, PHYS_AVAIL_ENTRIES, 490 hwphyssz, NULL, NULL); 491 nextidx = regions_to_avail(phys_avail, EXFLAG_NOALLOC, 492 PHYS_AVAIL_ENTRIES, hwphyssz, &physmem, &realmem); 493 if (nextidx == 0) 494 panic("No memory entries in phys_avail"); 495 Maxmem = atop(phys_avail[nextidx - 1]); 496 } 497 498 #ifdef DDB 499 #include <ddb/ddb.h> 500 501 DB_SHOW_COMMAND(physmem, db_show_physmem) 502 { 503 504 physmem_dump_tables(db_printf); 505 } 506 507 #endif /* DDB */ 508