xref: /freebsd/sys/x86/acpica/srat.c (revision d06955f9bdb1416d9196043ed781f9b36dae9adc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Hudson River Trading LLC
5  * Written by: John H. Baldwin <jhb@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_vm.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/smp.h>
42 #include <sys/vmmeter.h>
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_phys.h>
48 
49 #include <contrib/dev/acpica/include/acpi.h>
50 #include <contrib/dev/acpica/include/aclocal.h>
51 #include <contrib/dev/acpica/include/actables.h>
52 
53 #include <machine/intr_machdep.h>
54 #include <machine/md_var.h>
55 #include <x86/apicvar.h>
56 
57 #include <dev/acpica/acpivar.h>
58 
59 #if MAXMEMDOM > 1
60 static struct cpu_info {
61 	int enabled:1;
62 	int has_memory:1;
63 	int domain;
64 } *cpus;
65 
66 struct mem_affinity mem_info[VM_PHYSSEG_MAX + 1];
67 int num_mem;
68 
69 static ACPI_TABLE_SRAT *srat;
70 static vm_paddr_t srat_physaddr;
71 
72 static int domain_pxm[MAXMEMDOM];
73 static int ndomain;
74 
75 static ACPI_TABLE_SLIT *slit;
76 static vm_paddr_t slit_physaddr;
77 static int vm_locality_table[MAXMEMDOM * MAXMEMDOM];
78 
79 static void	srat_walk_table(acpi_subtable_handler *handler, void *arg);
80 
81 /*
82  * SLIT parsing.
83  */
84 
85 static void
86 slit_parse_table(ACPI_TABLE_SLIT *s)
87 {
88 	int i, j;
89 	int i_domain, j_domain;
90 	int offset = 0;
91 	uint8_t e;
92 
93 	/*
94 	 * This maps the SLIT data into the VM-domain centric view.
95 	 * There may be sparse entries in the PXM namespace, so
96 	 * remap them to a VM-domain ID and if it doesn't exist,
97 	 * skip it.
98 	 *
99 	 * It should result in a packed 2d array of VM-domain
100 	 * locality information entries.
101 	 */
102 
103 	if (bootverbose)
104 		printf("SLIT.Localities: %d\n", (int) s->LocalityCount);
105 	for (i = 0; i < s->LocalityCount; i++) {
106 		i_domain = acpi_map_pxm_to_vm_domainid(i);
107 		if (i_domain < 0)
108 			continue;
109 
110 		if (bootverbose)
111 			printf("%d: ", i);
112 		for (j = 0; j < s->LocalityCount; j++) {
113 			j_domain = acpi_map_pxm_to_vm_domainid(j);
114 			if (j_domain < 0)
115 				continue;
116 			e = s->Entry[i * s->LocalityCount + j];
117 			if (bootverbose)
118 				printf("%d ", (int) e);
119 			/* 255 == "no locality information" */
120 			if (e == 255)
121 				vm_locality_table[offset] = -1;
122 			else
123 				vm_locality_table[offset] = e;
124 			offset++;
125 		}
126 		if (bootverbose)
127 			printf("\n");
128 	}
129 }
130 
131 /*
132  * Look for an ACPI System Locality Distance Information Table ("SLIT")
133  */
134 static int
135 parse_slit(void)
136 {
137 
138 	if (resource_disabled("slit", 0)) {
139 		return (-1);
140 	}
141 
142 	slit_physaddr = acpi_find_table(ACPI_SIG_SLIT);
143 	if (slit_physaddr == 0) {
144 		return (-1);
145 	}
146 
147 	/*
148 	 * Make a pass over the table to populate the cpus[] and
149 	 * mem_info[] tables.
150 	 */
151 	slit = acpi_map_table(slit_physaddr, ACPI_SIG_SLIT);
152 	slit_parse_table(slit);
153 	acpi_unmap_table(slit);
154 	slit = NULL;
155 
156 #ifdef VM_NUMA_ALLOC
157 	/* Tell the VM about it! */
158 	mem_locality = vm_locality_table;
159 #endif
160 	return (0);
161 }
162 
163 /*
164  * SRAT parsing.
165  */
166 
167 /*
168  * Returns true if a memory range overlaps with at least one range in
169  * phys_avail[].
170  */
171 static int
172 overlaps_phys_avail(vm_paddr_t start, vm_paddr_t end)
173 {
174 	int i;
175 
176 	for (i = 0; phys_avail[i] != 0 && phys_avail[i + 1] != 0; i += 2) {
177 		if (phys_avail[i + 1] <= start)
178 			continue;
179 		if (phys_avail[i] < end)
180 			return (1);
181 		break;
182 	}
183 	return (0);
184 
185 }
186 
187 static void
188 srat_parse_entry(ACPI_SUBTABLE_HEADER *entry, void *arg)
189 {
190 	ACPI_SRAT_CPU_AFFINITY *cpu;
191 	ACPI_SRAT_X2APIC_CPU_AFFINITY *x2apic;
192 	ACPI_SRAT_MEM_AFFINITY *mem;
193 	int domain, i, slot;
194 
195 	switch (entry->Type) {
196 	case ACPI_SRAT_TYPE_CPU_AFFINITY:
197 		cpu = (ACPI_SRAT_CPU_AFFINITY *)entry;
198 		domain = cpu->ProximityDomainLo |
199 		    cpu->ProximityDomainHi[0] << 8 |
200 		    cpu->ProximityDomainHi[1] << 16 |
201 		    cpu->ProximityDomainHi[2] << 24;
202 		if (bootverbose)
203 			printf("SRAT: Found CPU APIC ID %u domain %d: %s\n",
204 			    cpu->ApicId, domain,
205 			    (cpu->Flags & ACPI_SRAT_CPU_ENABLED) ?
206 			    "enabled" : "disabled");
207 		if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED))
208 			break;
209 		if (cpu->ApicId > max_apic_id) {
210 			printf("SRAT: Ignoring local APIC ID %u (too high)\n",
211 			    cpu->ApicId);
212 			break;
213 		}
214 
215 		if (cpus[cpu->ApicId].enabled) {
216 			printf("SRAT: Duplicate local APIC ID %u\n",
217 			    cpu->ApicId);
218 			*(int *)arg = ENXIO;
219 			break;
220 		}
221 		cpus[cpu->ApicId].domain = domain;
222 		cpus[cpu->ApicId].enabled = 1;
223 		break;
224 	case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
225 		x2apic = (ACPI_SRAT_X2APIC_CPU_AFFINITY *)entry;
226 		if (bootverbose)
227 			printf("SRAT: Found CPU APIC ID %u domain %d: %s\n",
228 			    x2apic->ApicId, x2apic->ProximityDomain,
229 			    (x2apic->Flags & ACPI_SRAT_CPU_ENABLED) ?
230 			    "enabled" : "disabled");
231 		if (!(x2apic->Flags & ACPI_SRAT_CPU_ENABLED))
232 			break;
233 		if (x2apic->ApicId > max_apic_id) {
234 			printf("SRAT: Ignoring local APIC ID %u (too high)\n",
235 			    x2apic->ApicId);
236 			break;
237 		}
238 
239 		KASSERT(!cpus[x2apic->ApicId].enabled,
240 		    ("Duplicate local APIC ID %u", x2apic->ApicId));
241 		cpus[x2apic->ApicId].domain = x2apic->ProximityDomain;
242 		cpus[x2apic->ApicId].enabled = 1;
243 		break;
244 	case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
245 		mem = (ACPI_SRAT_MEM_AFFINITY *)entry;
246 		if (bootverbose)
247 			printf(
248 		    "SRAT: Found memory domain %d addr 0x%jx len 0x%jx: %s\n",
249 			    mem->ProximityDomain, (uintmax_t)mem->BaseAddress,
250 			    (uintmax_t)mem->Length,
251 			    (mem->Flags & ACPI_SRAT_MEM_ENABLED) ?
252 			    "enabled" : "disabled");
253 		if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED))
254 			break;
255 		if (!overlaps_phys_avail(mem->BaseAddress,
256 		    mem->BaseAddress + mem->Length)) {
257 			printf("SRAT: Ignoring memory at addr 0x%jx\n",
258 			    (uintmax_t)mem->BaseAddress);
259 			break;
260 		}
261 		if (num_mem == VM_PHYSSEG_MAX) {
262 			printf("SRAT: Too many memory regions\n");
263 			*(int *)arg = ENXIO;
264 			break;
265 		}
266 		slot = num_mem;
267 		for (i = 0; i < num_mem; i++) {
268 			if (mem_info[i].end <= mem->BaseAddress)
269 				continue;
270 			if (mem_info[i].start <
271 			    (mem->BaseAddress + mem->Length)) {
272 				printf("SRAT: Overlapping memory entries\n");
273 				*(int *)arg = ENXIO;
274 				return;
275 			}
276 			slot = i;
277 		}
278 		for (i = num_mem; i > slot; i--)
279 			mem_info[i] = mem_info[i - 1];
280 		mem_info[slot].start = mem->BaseAddress;
281 		mem_info[slot].end = mem->BaseAddress + mem->Length;
282 		mem_info[slot].domain = mem->ProximityDomain;
283 		num_mem++;
284 		break;
285 	}
286 }
287 
288 /*
289  * Ensure each memory domain has at least one CPU and that each CPU
290  * has at least one memory domain.
291  */
292 static int
293 check_domains(void)
294 {
295 	int found, i, j;
296 
297 	for (i = 0; i < num_mem; i++) {
298 		found = 0;
299 		for (j = 0; j <= max_apic_id; j++)
300 			if (cpus[j].enabled &&
301 			    cpus[j].domain == mem_info[i].domain) {
302 				cpus[j].has_memory = 1;
303 				found++;
304 			}
305 		if (!found) {
306 			printf("SRAT: No CPU found for memory domain %d\n",
307 			    mem_info[i].domain);
308 			return (ENXIO);
309 		}
310 	}
311 	for (i = 0; i <= max_apic_id; i++)
312 		if (cpus[i].enabled && !cpus[i].has_memory) {
313 			printf("SRAT: No memory found for CPU %d\n", i);
314 			return (ENXIO);
315 		}
316 	return (0);
317 }
318 
319 /*
320  * Check that the SRAT memory regions cover all of the regions in
321  * phys_avail[].
322  */
323 static int
324 check_phys_avail(void)
325 {
326 	vm_paddr_t address;
327 	int i, j;
328 
329 	/* j is the current offset into phys_avail[]. */
330 	address = phys_avail[0];
331 	j = 0;
332 	for (i = 0; i < num_mem; i++) {
333 		/*
334 		 * Consume as many phys_avail[] entries as fit in this
335 		 * region.
336 		 */
337 		while (address >= mem_info[i].start &&
338 		    address <= mem_info[i].end) {
339 			/*
340 			 * If we cover the rest of this phys_avail[] entry,
341 			 * advance to the next entry.
342 			 */
343 			if (phys_avail[j + 1] <= mem_info[i].end) {
344 				j += 2;
345 				if (phys_avail[j] == 0 &&
346 				    phys_avail[j + 1] == 0) {
347 					return (0);
348 				}
349 				address = phys_avail[j];
350 			} else
351 				address = mem_info[i].end + 1;
352 		}
353 	}
354 	printf("SRAT: No memory region found for 0x%jx - 0x%jx\n",
355 	    (uintmax_t)phys_avail[j], (uintmax_t)phys_avail[j + 1]);
356 	return (ENXIO);
357 }
358 
359 /*
360  * Renumber the memory domains to be compact and zero-based if not
361  * already.  Returns an error if there are too many domains.
362  */
363 static int
364 renumber_domains(void)
365 {
366 	int i, j, slot;
367 
368 	/* Enumerate all the domains. */
369 	ndomain = 0;
370 	for (i = 0; i < num_mem; i++) {
371 		/* See if this domain is already known. */
372 		for (j = 0; j < ndomain; j++) {
373 			if (domain_pxm[j] >= mem_info[i].domain)
374 				break;
375 		}
376 		if (j < ndomain && domain_pxm[j] == mem_info[i].domain)
377 			continue;
378 
379 		if (ndomain >= MAXMEMDOM) {
380 			ndomain = 1;
381 			printf("SRAT: Too many memory domains\n");
382 			return (EFBIG);
383 		}
384 
385 		/* Insert the new domain at slot 'j'. */
386 		slot = j;
387 		for (j = ndomain; j > slot; j--)
388 			domain_pxm[j] = domain_pxm[j - 1];
389 		domain_pxm[slot] = mem_info[i].domain;
390 		ndomain++;
391 	}
392 
393 	/* Renumber each domain to its index in the sorted 'domain_pxm' list. */
394 	for (i = 0; i < ndomain; i++) {
395 		/*
396 		 * If the domain is already the right value, no need
397 		 * to renumber.
398 		 */
399 		if (domain_pxm[i] == i)
400 			continue;
401 
402 		/* Walk the cpu[] and mem_info[] arrays to renumber. */
403 		for (j = 0; j < num_mem; j++)
404 			if (mem_info[j].domain == domain_pxm[i])
405 				mem_info[j].domain = i;
406 		for (j = 0; j <= max_apic_id; j++)
407 			if (cpus[j].enabled && cpus[j].domain == domain_pxm[i])
408 				cpus[j].domain = i;
409 	}
410 
411 	return (0);
412 }
413 
414 /*
415  * Look for an ACPI System Resource Affinity Table ("SRAT")
416  */
417 static int
418 parse_srat(void)
419 {
420 	unsigned int idx, size;
421 	vm_paddr_t addr;
422 	int error;
423 
424 	if (resource_disabled("srat", 0))
425 		return (-1);
426 
427 	srat_physaddr = acpi_find_table(ACPI_SIG_SRAT);
428 	if (srat_physaddr == 0)
429 		return (-1);
430 
431 	/*
432 	 * Allocate data structure:
433 	 *
434 	 * Find the last physical memory region and steal some memory from
435 	 * it. This is done because at this point in the boot process
436 	 * malloc is still not usable.
437 	 */
438 	for (idx = 0; phys_avail[idx + 1] != 0; idx += 2);
439 	KASSERT(idx != 0, ("phys_avail is empty!"));
440 	idx -= 2;
441 
442 	size =  sizeof(*cpus) * (max_apic_id + 1);
443 	addr = trunc_page(phys_avail[idx + 1] - size);
444 	KASSERT(addr >= phys_avail[idx],
445 	    ("Not enough memory for SRAT table items"));
446 	phys_avail[idx + 1] = addr - 1;
447 
448 	/*
449 	 * We cannot rely on PHYS_TO_DMAP because this code is also used in
450 	 * i386, so use pmap_mapbios to map the memory, this will end up using
451 	 * the default memory attribute (WB), and the DMAP when available.
452 	 */
453 	cpus = (struct cpu_info *)pmap_mapbios(addr, size);
454 	bzero(cpus, size);
455 
456 	/*
457 	 * Make a pass over the table to populate the cpus[] and
458 	 * mem_info[] tables.
459 	 */
460 	srat = acpi_map_table(srat_physaddr, ACPI_SIG_SRAT);
461 	error = 0;
462 	srat_walk_table(srat_parse_entry, &error);
463 	acpi_unmap_table(srat);
464 	srat = NULL;
465 	if (error || check_domains() != 0 || check_phys_avail() != 0 ||
466 	    renumber_domains() != 0) {
467 		srat_physaddr = 0;
468 		return (-1);
469 	}
470 
471 #ifdef VM_NUMA_ALLOC
472 	/* Point vm_phys at our memory affinity table. */
473 	vm_ndomains = ndomain;
474 	mem_affinity = mem_info;
475 #endif
476 
477 	return (0);
478 }
479 
480 static void
481 init_mem_locality(void)
482 {
483 	int i;
484 
485 	/*
486 	 * For now, assume -1 == "no locality information for
487 	 * this pairing.
488 	 */
489 	for (i = 0; i < MAXMEMDOM * MAXMEMDOM; i++)
490 		vm_locality_table[i] = -1;
491 }
492 
493 static void
494 parse_acpi_tables(void *dummy)
495 {
496 
497 	if (parse_srat() < 0)
498 		return;
499 	init_mem_locality();
500 	(void) parse_slit();
501 }
502 SYSINIT(parse_acpi_tables, SI_SUB_VM - 1, SI_ORDER_FIRST, parse_acpi_tables,
503     NULL);
504 
505 static void
506 srat_walk_table(acpi_subtable_handler *handler, void *arg)
507 {
508 
509 	acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
510 	    handler, arg);
511 }
512 
513 /*
514  * Setup per-CPU domain IDs.
515  */
516 static void
517 srat_set_cpus(void *dummy)
518 {
519 	struct cpu_info *cpu;
520 	struct pcpu *pc;
521 	u_int i;
522 
523 	if (srat_physaddr == 0)
524 		return;
525 	for (i = 0; i < MAXCPU; i++) {
526 		if (CPU_ABSENT(i))
527 			continue;
528 		pc = pcpu_find(i);
529 		KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
530 		cpu = &cpus[pc->pc_apic_id];
531 		if (!cpu->enabled)
532 			panic("SRAT: CPU with APIC ID %u is not known",
533 			    pc->pc_apic_id);
534 		pc->pc_domain = cpu->domain;
535 		CPU_SET(i, &cpuset_domain[cpu->domain]);
536 		if (bootverbose)
537 			printf("SRAT: CPU %u has memory domain %d\n", i,
538 			    cpu->domain);
539 	}
540 
541 	/* Last usage of the cpus array, unmap it. */
542 	pmap_unmapbios((vm_offset_t)cpus, sizeof(*cpus) * (max_apic_id + 1));
543 	cpus = NULL;
544 }
545 SYSINIT(srat_set_cpus, SI_SUB_CPU, SI_ORDER_ANY, srat_set_cpus, NULL);
546 
547 /*
548  * Map a _PXM value to a VM domain ID.
549  *
550  * Returns the domain ID, or -1 if no domain ID was found.
551  */
552 int
553 acpi_map_pxm_to_vm_domainid(int pxm)
554 {
555 	int i;
556 
557 	for (i = 0; i < ndomain; i++) {
558 		if (domain_pxm[i] == pxm)
559 			return (i);
560 	}
561 
562 	return (-1);
563 }
564 
565 #else /* MAXMEMDOM == 1 */
566 
567 int
568 acpi_map_pxm_to_vm_domainid(int pxm)
569 {
570 
571 	return (-1);
572 }
573 
574 #endif /* MAXMEMDOM > 1 */
575