1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010 Hudson River Trading LLC 5 * Written by: John H. Baldwin <jhb@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_vm.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/smp.h> 42 #include <sys/vmmeter.h> 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_param.h> 46 #include <vm/vm_page.h> 47 #include <vm/vm_phys.h> 48 49 #include <contrib/dev/acpica/include/acpi.h> 50 #include <contrib/dev/acpica/include/aclocal.h> 51 #include <contrib/dev/acpica/include/actables.h> 52 53 #include <machine/intr_machdep.h> 54 #include <machine/md_var.h> 55 #include <x86/apicvar.h> 56 57 #include <dev/acpica/acpivar.h> 58 59 #if MAXMEMDOM > 1 60 static struct cpu_info { 61 int enabled:1; 62 int has_memory:1; 63 int domain; 64 } *cpus; 65 66 struct mem_affinity mem_info[VM_PHYSSEG_MAX + 1]; 67 int num_mem; 68 69 static ACPI_TABLE_SRAT *srat; 70 static vm_paddr_t srat_physaddr; 71 72 static int domain_pxm[MAXMEMDOM]; 73 static int ndomain; 74 75 static ACPI_TABLE_SLIT *slit; 76 static vm_paddr_t slit_physaddr; 77 static int vm_locality_table[MAXMEMDOM * MAXMEMDOM]; 78 79 static void srat_walk_table(acpi_subtable_handler *handler, void *arg); 80 81 /* 82 * SLIT parsing. 83 */ 84 85 static void 86 slit_parse_table(ACPI_TABLE_SLIT *s) 87 { 88 int i, j; 89 int i_domain, j_domain; 90 int offset = 0; 91 uint8_t e; 92 93 /* 94 * This maps the SLIT data into the VM-domain centric view. 95 * There may be sparse entries in the PXM namespace, so 96 * remap them to a VM-domain ID and if it doesn't exist, 97 * skip it. 98 * 99 * It should result in a packed 2d array of VM-domain 100 * locality information entries. 101 */ 102 103 if (bootverbose) 104 printf("SLIT.Localities: %d\n", (int) s->LocalityCount); 105 for (i = 0; i < s->LocalityCount; i++) { 106 i_domain = acpi_map_pxm_to_vm_domainid(i); 107 if (i_domain < 0) 108 continue; 109 110 if (bootverbose) 111 printf("%d: ", i); 112 for (j = 0; j < s->LocalityCount; j++) { 113 j_domain = acpi_map_pxm_to_vm_domainid(j); 114 if (j_domain < 0) 115 continue; 116 e = s->Entry[i * s->LocalityCount + j]; 117 if (bootverbose) 118 printf("%d ", (int) e); 119 /* 255 == "no locality information" */ 120 if (e == 255) 121 vm_locality_table[offset] = -1; 122 else 123 vm_locality_table[offset] = e; 124 offset++; 125 } 126 if (bootverbose) 127 printf("\n"); 128 } 129 } 130 131 /* 132 * Look for an ACPI System Locality Distance Information Table ("SLIT") 133 */ 134 static int 135 parse_slit(void) 136 { 137 138 if (resource_disabled("slit", 0)) { 139 return (-1); 140 } 141 142 slit_physaddr = acpi_find_table(ACPI_SIG_SLIT); 143 if (slit_physaddr == 0) { 144 return (-1); 145 } 146 147 /* 148 * Make a pass over the table to populate the cpus[] and 149 * mem_info[] tables. 150 */ 151 slit = acpi_map_table(slit_physaddr, ACPI_SIG_SLIT); 152 slit_parse_table(slit); 153 acpi_unmap_table(slit); 154 slit = NULL; 155 156 #ifdef NUMA 157 /* Tell the VM about it! */ 158 mem_locality = vm_locality_table; 159 #endif 160 return (0); 161 } 162 163 /* 164 * SRAT parsing. 165 */ 166 167 /* 168 * Returns true if a memory range overlaps with at least one range in 169 * phys_avail[]. 170 */ 171 static int 172 overlaps_phys_avail(vm_paddr_t start, vm_paddr_t end) 173 { 174 int i; 175 176 for (i = 0; phys_avail[i] != 0 && phys_avail[i + 1] != 0; i += 2) { 177 if (phys_avail[i + 1] <= start) 178 continue; 179 if (phys_avail[i] < end) 180 return (1); 181 break; 182 } 183 return (0); 184 185 } 186 187 static void 188 srat_parse_entry(ACPI_SUBTABLE_HEADER *entry, void *arg) 189 { 190 ACPI_SRAT_CPU_AFFINITY *cpu; 191 ACPI_SRAT_X2APIC_CPU_AFFINITY *x2apic; 192 ACPI_SRAT_MEM_AFFINITY *mem; 193 int domain, i, slot; 194 195 switch (entry->Type) { 196 case ACPI_SRAT_TYPE_CPU_AFFINITY: 197 cpu = (ACPI_SRAT_CPU_AFFINITY *)entry; 198 domain = cpu->ProximityDomainLo | 199 cpu->ProximityDomainHi[0] << 8 | 200 cpu->ProximityDomainHi[1] << 16 | 201 cpu->ProximityDomainHi[2] << 24; 202 if (bootverbose) 203 printf("SRAT: Found CPU APIC ID %u domain %d: %s\n", 204 cpu->ApicId, domain, 205 (cpu->Flags & ACPI_SRAT_CPU_ENABLED) ? 206 "enabled" : "disabled"); 207 if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED)) 208 break; 209 if (cpu->ApicId > max_apic_id) { 210 printf("SRAT: Ignoring local APIC ID %u (too high)\n", 211 cpu->ApicId); 212 break; 213 } 214 215 if (cpus[cpu->ApicId].enabled) { 216 printf("SRAT: Duplicate local APIC ID %u\n", 217 cpu->ApicId); 218 *(int *)arg = ENXIO; 219 break; 220 } 221 cpus[cpu->ApicId].domain = domain; 222 cpus[cpu->ApicId].enabled = 1; 223 break; 224 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: 225 x2apic = (ACPI_SRAT_X2APIC_CPU_AFFINITY *)entry; 226 if (bootverbose) 227 printf("SRAT: Found CPU APIC ID %u domain %d: %s\n", 228 x2apic->ApicId, x2apic->ProximityDomain, 229 (x2apic->Flags & ACPI_SRAT_CPU_ENABLED) ? 230 "enabled" : "disabled"); 231 if (!(x2apic->Flags & ACPI_SRAT_CPU_ENABLED)) 232 break; 233 if (x2apic->ApicId > max_apic_id) { 234 printf("SRAT: Ignoring local APIC ID %u (too high)\n", 235 x2apic->ApicId); 236 break; 237 } 238 239 KASSERT(!cpus[x2apic->ApicId].enabled, 240 ("Duplicate local APIC ID %u", x2apic->ApicId)); 241 cpus[x2apic->ApicId].domain = x2apic->ProximityDomain; 242 cpus[x2apic->ApicId].enabled = 1; 243 break; 244 case ACPI_SRAT_TYPE_MEMORY_AFFINITY: 245 mem = (ACPI_SRAT_MEM_AFFINITY *)entry; 246 if (bootverbose) 247 printf( 248 "SRAT: Found memory domain %d addr 0x%jx len 0x%jx: %s\n", 249 mem->ProximityDomain, (uintmax_t)mem->BaseAddress, 250 (uintmax_t)mem->Length, 251 (mem->Flags & ACPI_SRAT_MEM_ENABLED) ? 252 "enabled" : "disabled"); 253 if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED)) 254 break; 255 if (mem->BaseAddress >= cpu_getmaxphyaddr() || 256 !overlaps_phys_avail(mem->BaseAddress, 257 mem->BaseAddress + mem->Length)) { 258 printf("SRAT: Ignoring memory at addr 0x%jx\n", 259 (uintmax_t)mem->BaseAddress); 260 break; 261 } 262 if (num_mem == VM_PHYSSEG_MAX) { 263 printf("SRAT: Too many memory regions\n"); 264 *(int *)arg = ENXIO; 265 break; 266 } 267 slot = num_mem; 268 for (i = 0; i < num_mem; i++) { 269 if (mem_info[i].end <= mem->BaseAddress) 270 continue; 271 if (mem_info[i].start < 272 (mem->BaseAddress + mem->Length)) { 273 printf("SRAT: Overlapping memory entries\n"); 274 *(int *)arg = ENXIO; 275 return; 276 } 277 slot = i; 278 } 279 for (i = num_mem; i > slot; i--) 280 mem_info[i] = mem_info[i - 1]; 281 mem_info[slot].start = mem->BaseAddress; 282 mem_info[slot].end = mem->BaseAddress + mem->Length; 283 mem_info[slot].domain = mem->ProximityDomain; 284 num_mem++; 285 break; 286 } 287 } 288 289 /* 290 * Ensure each memory domain has at least one CPU and that each CPU 291 * has at least one memory domain. 292 */ 293 static int 294 check_domains(void) 295 { 296 int found, i, j; 297 298 for (i = 0; i < num_mem; i++) { 299 found = 0; 300 for (j = 0; j <= max_apic_id; j++) 301 if (cpus[j].enabled && 302 cpus[j].domain == mem_info[i].domain) { 303 cpus[j].has_memory = 1; 304 found++; 305 } 306 if (!found) { 307 printf("SRAT: No CPU found for memory domain %d\n", 308 mem_info[i].domain); 309 return (ENXIO); 310 } 311 } 312 for (i = 0; i <= max_apic_id; i++) 313 if (cpus[i].enabled && !cpus[i].has_memory) { 314 printf("SRAT: No memory found for CPU %d\n", i); 315 return (ENXIO); 316 } 317 return (0); 318 } 319 320 /* 321 * Check that the SRAT memory regions cover all of the regions in 322 * phys_avail[]. 323 */ 324 static int 325 check_phys_avail(void) 326 { 327 vm_paddr_t address; 328 int i, j; 329 330 /* j is the current offset into phys_avail[]. */ 331 address = phys_avail[0]; 332 j = 0; 333 for (i = 0; i < num_mem; i++) { 334 /* 335 * Consume as many phys_avail[] entries as fit in this 336 * region. 337 */ 338 while (address >= mem_info[i].start && 339 address <= mem_info[i].end) { 340 /* 341 * If we cover the rest of this phys_avail[] entry, 342 * advance to the next entry. 343 */ 344 if (phys_avail[j + 1] <= mem_info[i].end) { 345 j += 2; 346 if (phys_avail[j] == 0 && 347 phys_avail[j + 1] == 0) { 348 return (0); 349 } 350 address = phys_avail[j]; 351 } else 352 address = mem_info[i].end + 1; 353 } 354 } 355 printf("SRAT: No memory region found for 0x%jx - 0x%jx\n", 356 (uintmax_t)phys_avail[j], (uintmax_t)phys_avail[j + 1]); 357 return (ENXIO); 358 } 359 360 /* 361 * Renumber the memory domains to be compact and zero-based if not 362 * already. Returns an error if there are too many domains. 363 */ 364 static int 365 renumber_domains(void) 366 { 367 int i, j, slot; 368 369 /* Enumerate all the domains. */ 370 ndomain = 0; 371 for (i = 0; i < num_mem; i++) { 372 /* See if this domain is already known. */ 373 for (j = 0; j < ndomain; j++) { 374 if (domain_pxm[j] >= mem_info[i].domain) 375 break; 376 } 377 if (j < ndomain && domain_pxm[j] == mem_info[i].domain) 378 continue; 379 380 if (ndomain >= MAXMEMDOM) { 381 ndomain = 1; 382 printf("SRAT: Too many memory domains\n"); 383 return (EFBIG); 384 } 385 386 /* Insert the new domain at slot 'j'. */ 387 slot = j; 388 for (j = ndomain; j > slot; j--) 389 domain_pxm[j] = domain_pxm[j - 1]; 390 domain_pxm[slot] = mem_info[i].domain; 391 ndomain++; 392 } 393 394 /* Renumber each domain to its index in the sorted 'domain_pxm' list. */ 395 for (i = 0; i < ndomain; i++) { 396 /* 397 * If the domain is already the right value, no need 398 * to renumber. 399 */ 400 if (domain_pxm[i] == i) 401 continue; 402 403 /* Walk the cpu[] and mem_info[] arrays to renumber. */ 404 for (j = 0; j < num_mem; j++) 405 if (mem_info[j].domain == domain_pxm[i]) 406 mem_info[j].domain = i; 407 for (j = 0; j <= max_apic_id; j++) 408 if (cpus[j].enabled && cpus[j].domain == domain_pxm[i]) 409 cpus[j].domain = i; 410 } 411 412 return (0); 413 } 414 415 /* 416 * Look for an ACPI System Resource Affinity Table ("SRAT") 417 */ 418 static int 419 parse_srat(void) 420 { 421 unsigned int idx, size; 422 vm_paddr_t addr; 423 int error; 424 425 if (resource_disabled("srat", 0)) 426 return (-1); 427 428 srat_physaddr = acpi_find_table(ACPI_SIG_SRAT); 429 if (srat_physaddr == 0) 430 return (-1); 431 432 /* 433 * Allocate data structure: 434 * 435 * Find the last physical memory region and steal some memory from 436 * it. This is done because at this point in the boot process 437 * malloc is still not usable. 438 */ 439 for (idx = 0; phys_avail[idx + 1] != 0; idx += 2); 440 KASSERT(idx != 0, ("phys_avail is empty!")); 441 idx -= 2; 442 443 size = sizeof(*cpus) * (max_apic_id + 1); 444 addr = trunc_page(phys_avail[idx + 1] - size); 445 KASSERT(addr >= phys_avail[idx], 446 ("Not enough memory for SRAT table items")); 447 phys_avail[idx + 1] = addr - 1; 448 449 /* 450 * We cannot rely on PHYS_TO_DMAP because this code is also used in 451 * i386, so use pmap_mapbios to map the memory, this will end up using 452 * the default memory attribute (WB), and the DMAP when available. 453 */ 454 cpus = (struct cpu_info *)pmap_mapbios(addr, size); 455 bzero(cpus, size); 456 457 /* 458 * Make a pass over the table to populate the cpus[] and 459 * mem_info[] tables. 460 */ 461 srat = acpi_map_table(srat_physaddr, ACPI_SIG_SRAT); 462 error = 0; 463 srat_walk_table(srat_parse_entry, &error); 464 acpi_unmap_table(srat); 465 srat = NULL; 466 if (error || check_domains() != 0 || check_phys_avail() != 0 || 467 renumber_domains() != 0) { 468 srat_physaddr = 0; 469 return (-1); 470 } 471 472 #ifdef NUMA 473 /* Point vm_phys at our memory affinity table. */ 474 vm_ndomains = ndomain; 475 mem_affinity = mem_info; 476 #endif 477 478 return (0); 479 } 480 481 static void 482 init_mem_locality(void) 483 { 484 int i; 485 486 /* 487 * For now, assume -1 == "no locality information for 488 * this pairing. 489 */ 490 for (i = 0; i < MAXMEMDOM * MAXMEMDOM; i++) 491 vm_locality_table[i] = -1; 492 } 493 494 static void 495 parse_acpi_tables(void *dummy) 496 { 497 498 if (parse_srat() < 0) 499 return; 500 init_mem_locality(); 501 (void) parse_slit(); 502 } 503 SYSINIT(parse_acpi_tables, SI_SUB_VM - 1, SI_ORDER_FIRST, parse_acpi_tables, 504 NULL); 505 506 static void 507 srat_walk_table(acpi_subtable_handler *handler, void *arg) 508 { 509 510 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length, 511 handler, arg); 512 } 513 514 /* 515 * Setup per-CPU domain IDs. 516 */ 517 static void 518 srat_set_cpus(void *dummy) 519 { 520 struct cpu_info *cpu; 521 struct pcpu *pc; 522 u_int i; 523 524 if (srat_physaddr == 0) 525 return; 526 for (i = 0; i < MAXCPU; i++) { 527 if (CPU_ABSENT(i)) 528 continue; 529 pc = pcpu_find(i); 530 KASSERT(pc != NULL, ("no pcpu data for CPU %u", i)); 531 cpu = &cpus[pc->pc_apic_id]; 532 if (!cpu->enabled) 533 panic("SRAT: CPU with APIC ID %u is not known", 534 pc->pc_apic_id); 535 #ifdef NUMA 536 pc->pc_domain = cpu->domain; 537 #else 538 pc->pc_domain = 0; 539 #endif 540 CPU_SET(i, &cpuset_domain[pc->pc_domain]); 541 if (bootverbose) 542 printf("SRAT: CPU %u has memory domain %d\n", i, 543 pc->pc_domain); 544 } 545 546 /* Last usage of the cpus array, unmap it. */ 547 pmap_unmapbios((vm_offset_t)cpus, sizeof(*cpus) * (max_apic_id + 1)); 548 cpus = NULL; 549 } 550 SYSINIT(srat_set_cpus, SI_SUB_CPU, SI_ORDER_ANY, srat_set_cpus, NULL); 551 552 /* 553 * Map a _PXM value to a VM domain ID. 554 * 555 * Returns the domain ID, or -1 if no domain ID was found. 556 */ 557 int 558 acpi_map_pxm_to_vm_domainid(int pxm) 559 { 560 int i; 561 562 for (i = 0; i < ndomain; i++) { 563 if (domain_pxm[i] == pxm) 564 return (i); 565 } 566 567 return (-1); 568 } 569 570 #else /* MAXMEMDOM == 1 */ 571 572 int 573 acpi_map_pxm_to_vm_domainid(int pxm) 574 { 575 576 return (-1); 577 } 578 579 #endif /* MAXMEMDOM > 1 */ 580