1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25 /*
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
28 */
29
30 /*
31 * LOCALITY GROUP (LGROUP) PLATFORM SUPPORT FOR X86/AMD64 PLATFORMS
32 * ================================================================
33 * Multiprocessor AMD and Intel systems may have Non Uniform Memory Access
34 * (NUMA). A NUMA machine consists of one or more "nodes" that each consist of
35 * one or more CPUs and some local memory. The CPUs in each node can access
36 * the memory in the other nodes but at a higher latency than accessing their
37 * local memory. Typically, a system with only one node has Uniform Memory
38 * Access (UMA), but it may be possible to have a one node system that has
39 * some global memory outside of the node which is higher latency.
40 *
41 * Module Description
42 * ------------------
43 * This module provides a platform interface for determining which CPUs and
44 * which memory (and how much) are in a NUMA node and how far each node is from
45 * each other. The interface is used by the Virtual Memory (VM) system and the
46 * common lgroup framework. The VM system uses the plat_*() routines to fill
47 * in its memory node (memnode) array with the physical address range spanned
48 * by each NUMA node to know which memory belongs to which node, so it can
49 * build and manage a physical page free list for each NUMA node and allocate
50 * local memory from each node as needed. The common lgroup framework uses the
51 * exported lgrp_plat_*() routines to figure out which CPUs and memory belong
52 * to each node (leaf lgroup) and how far each node is from each other, so it
53 * can build the latency (lgroup) topology for the machine in order to optimize
54 * for locality. Also, an lgroup platform handle instead of lgroups are used
55 * in the interface with this module, so this module shouldn't need to know
56 * anything about lgroups. Instead, it just needs to know which CPUs, memory,
57 * etc. are in each NUMA node, how far each node is from each other, and to use
58 * a unique lgroup platform handle to refer to each node through the interface.
59 *
60 * Determining NUMA Configuration
61 * ------------------------------
62 * By default, this module will try to determine the NUMA configuration of the
63 * machine by reading the ACPI System Resource Affinity Table (SRAT) and System
64 * Locality Information Table (SLIT). The SRAT contains info to tell which
65 * CPUs and memory are local to a given proximity domain (NUMA node). The SLIT
66 * is a matrix that gives the distance between each system locality (which is
67 * a NUMA node and should correspond to proximity domains in the SRAT). For
68 * more details on the SRAT and SLIT, please refer to an ACPI 3.0 or newer
69 * specification.
70 *
71 * If the SRAT doesn't exist on a system with AMD Opteron processors, we
72 * examine registers in PCI configuration space to determine how many nodes are
73 * in the system and which CPUs and memory are in each node.
74 * do while booting the kernel.
75 *
76 * NOTE: Using these PCI configuration space registers to determine this
77 * locality info is not guaranteed to work or be compatible across all
78 * Opteron processor families.
79 *
80 * If the SLIT does not exist or look right, the kernel will probe to determine
81 * the distance between nodes as long as the NUMA CPU and memory configuration
82 * has been determined (see lgrp_plat_probe() for details).
83 *
84 * Data Structures
85 * ---------------
86 * The main data structures used by this code are the following:
87 *
88 * - lgrp_plat_cpu_node[] CPU to node ID mapping table indexed by
89 * CPU ID (only used for SRAT)
90 *
91 * - lgrp_plat_lat_stats.latencies[][] Table of latencies between same and
92 * different nodes indexed by node ID
93 *
94 * - lgrp_plat_node_cnt Number of NUMA nodes in system for
95 * non-DR-capable systems,
96 * maximum possible number of NUMA nodes
97 * in system for DR capable systems.
98 *
99 * - lgrp_plat_node_domain[] Node ID to proximity domain ID mapping
100 * table indexed by node ID (only used
101 * for SRAT)
102 *
103 * - lgrp_plat_memnode_info[] Table with physical address range for
104 * each memory node indexed by memory node
105 * ID
106 *
107 * The code is implemented to make the following always be true:
108 *
109 * lgroup platform handle == node ID == memnode ID
110 *
111 * Moreover, it allows for the proximity domain ID to be equal to all of the
112 * above as long as the proximity domains IDs are numbered from 0 to <number of
113 * nodes - 1>. This is done by hashing each proximity domain ID into the range
114 * from 0 to <number of nodes - 1>. Then proximity ID N will hash into node ID
115 * N and proximity domain ID N will be entered into lgrp_plat_node_domain[N]
116 * and be assigned node ID N. If the proximity domain IDs aren't numbered
117 * from 0 to <number of nodes - 1>, then hashing the proximity domain IDs into
118 * lgrp_plat_node_domain[] will still work for assigning proximity domain IDs
119 * to node IDs. However, the proximity domain IDs may not map to the
120 * equivalent node ID since we want to keep the node IDs numbered from 0 to
121 * <number of nodes - 1> to minimize cost of searching and potentially space.
122 *
123 * With the introduction of support of memory DR operations on x86 platforms,
124 * things get a little complicated. The addresses of hot-added memory may not
125 * be continuous with other memory connected to the same lgrp node. In other
126 * words, memory addresses may get interleaved among lgrp nodes after memory
127 * DR operations. To work around this limitation, we have extended the
128 * relationship between lgrp node and memory node from 1:1 map to 1:N map,
129 * that means there may be multiple memory nodes associated with a lgrp node
130 * after memory DR operations.
131 *
132 * To minimize the code changes to support memory DR operations, the
133 * following policies have been adopted.
134 * 1) On non-DR-capable systems, the relationship among lgroup platform handle,
135 * node ID and memnode ID is still kept as:
136 * lgroup platform handle == node ID == memnode ID
137 * 2) For memory present at boot time on DR capable platforms, the relationship
138 * is still kept as is.
139 * lgroup platform handle == node ID == memnode ID
140 * 3) For hot-added memory, the relationship between lgrp ID and memnode ID have
141 * been changed from 1:1 map to 1:N map. Memnode IDs [0 - lgrp_plat_node_cnt)
142 * are reserved for memory present at boot time, and memnode IDs
143 * [lgrp_plat_node_cnt, max_mem_nodes) are used to dynamically allocate
144 * memnode ID for hot-added memory.
145 * 4) All boot code having the assumption "node ID == memnode ID" can live as
146 * is, that's because node ID is always equal to memnode ID at boot time.
147 * 5) The lgrp_plat_memnode_info_update(), plat_pfn_to_mem_node() and
148 * lgrp_plat_mem_size() related logics have been enhanced to deal with
149 * the 1:N map relationship.
150 * 6) The latency probing related logics, which have the assumption
151 * "node ID == memnode ID" and may be called at run time, is disabled if
152 * memory DR operation is enabled.
153 */
154
155
156 #include <sys/archsystm.h> /* for {in,out}{b,w,l}() */
157 #include <sys/atomic.h>
158 #include <sys/bootconf.h>
159 #include <sys/cmn_err.h>
160 #include <sys/controlregs.h>
161 #include <sys/cpupart.h>
162 #include <sys/cpuvar.h>
163 #include <sys/lgrp.h>
164 #include <sys/machsystm.h>
165 #include <sys/memlist.h>
166 #include <sys/memnode.h>
167 #include <sys/mman.h>
168 #include <sys/note.h>
169 #include <sys/pci_cfgspace.h>
170 #include <sys/pci_impl.h>
171 #include <sys/param.h>
172 #include <sys/pghw.h>
173 #include <sys/promif.h> /* for prom_printf() */
174 #include <sys/sysmacros.h>
175 #include <sys/systm.h>
176 #include <sys/thread.h>
177 #include <sys/types.h>
178 #include <sys/var.h>
179 #include <sys/x86_archext.h>
180 #include <vm/hat_i86.h>
181 #include <vm/seg_kmem.h>
182 #include <vm/vm_dep.h>
183
184 #include <sys/acpidev.h>
185 #include <sys/acpi/acpi.h> /* for SRAT, SLIT and MSCT */
186
187 /* from fakebop.c */
188 extern ACPI_TABLE_SRAT *srat_ptr;
189 extern ACPI_TABLE_SLIT *slit_ptr;
190 extern ACPI_TABLE_MSCT *msct_ptr;
191
192 #define MAX_NODES 8
193 #define NLGRP (MAX_NODES * (MAX_NODES - 1) + 1)
194
195 /*
196 * Constants for configuring probing
197 */
198 #define LGRP_PLAT_PROBE_NROUNDS 64 /* default laps for probing */
199 #define LGRP_PLAT_PROBE_NSAMPLES 1 /* default samples to take */
200 #define LGRP_PLAT_PROBE_NREADS 256 /* number of vendor ID reads */
201
202 /*
203 * Flags for probing
204 */
205 #define LGRP_PLAT_PROBE_ENABLE 0x1 /* enable probing */
206 #define LGRP_PLAT_PROBE_PGCPY 0x2 /* probe using page copy */
207 #define LGRP_PLAT_PROBE_VENDOR 0x4 /* probe vendor ID register */
208
209 /*
210 * Hash proximity domain ID into node to domain mapping table "mod" number of
211 * nodes to minimize span of entries used and try to have lowest numbered
212 * proximity domain be node 0
213 */
214 #define NODE_DOMAIN_HASH(domain, node_cnt) \
215 ((lgrp_plat_prox_domain_min == UINT32_MAX) ? (domain) % node_cnt : \
216 ((domain) - lgrp_plat_prox_domain_min) % node_cnt)
217
218 /*
219 * CPU to node ID mapping structure (only used with SRAT)
220 */
221 typedef struct cpu_node_map {
222 int exists;
223 uint_t node;
224 uint32_t apicid;
225 uint32_t prox_domain;
226 } cpu_node_map_t;
227
228 /*
229 * Latency statistics
230 */
231 typedef struct lgrp_plat_latency_stats {
232 hrtime_t latencies[MAX_NODES][MAX_NODES];
233 hrtime_t latency_max;
234 hrtime_t latency_min;
235 } lgrp_plat_latency_stats_t;
236
237 /*
238 * Memory configuration for probing
239 */
240 typedef struct lgrp_plat_probe_mem_config {
241 size_t probe_memsize; /* how much memory to probe per node */
242 caddr_t probe_va[MAX_NODES]; /* where memory mapped for probing */
243 pfn_t probe_pfn[MAX_NODES]; /* physical pages to map for probing */
244 } lgrp_plat_probe_mem_config_t;
245
246 /*
247 * Statistics kept for probing
248 */
249 typedef struct lgrp_plat_probe_stats {
250 hrtime_t flush_cost;
251 hrtime_t probe_cost;
252 hrtime_t probe_cost_total;
253 hrtime_t probe_error_code;
254 hrtime_t probe_errors[MAX_NODES][MAX_NODES];
255 int probe_suspect[MAX_NODES][MAX_NODES];
256 hrtime_t probe_max[MAX_NODES][MAX_NODES];
257 hrtime_t probe_min[MAX_NODES][MAX_NODES];
258 } lgrp_plat_probe_stats_t;
259
260 /*
261 * Node to proximity domain ID mapping structure (only used with SRAT)
262 */
263 typedef struct node_domain_map {
264 int exists;
265 uint32_t prox_domain;
266 } node_domain_map_t;
267
268 /*
269 * Node ID and starting and ending page for physical memory in memory node
270 */
271 typedef struct memnode_phys_addr_map {
272 pfn_t start;
273 pfn_t end;
274 int exists;
275 uint32_t prox_domain;
276 uint32_t device_id;
277 uint_t lgrphand;
278 } memnode_phys_addr_map_t;
279
280 /*
281 * Number of CPUs for which we got APIC IDs
282 */
283 static int lgrp_plat_apic_ncpus = 0;
284
285 /*
286 * CPU to node ID mapping table (only used for SRAT) and its max number of
287 * entries
288 */
289 static cpu_node_map_t *lgrp_plat_cpu_node = NULL;
290 static uint_t lgrp_plat_cpu_node_nentries = 0;
291
292 /*
293 * Latency statistics
294 */
295 lgrp_plat_latency_stats_t lgrp_plat_lat_stats;
296
297 /*
298 * Whether memory is interleaved across nodes causing MPO to be disabled
299 */
300 static int lgrp_plat_mem_intrlv = 0;
301
302 /*
303 * Node ID to proximity domain ID mapping table (only used for SRAT)
304 */
305 static node_domain_map_t lgrp_plat_node_domain[MAX_NODES];
306
307 /*
308 * Physical address range for memory in each node
309 */
310 static memnode_phys_addr_map_t lgrp_plat_memnode_info[MAX_MEM_NODES];
311
312 /*
313 * Statistics gotten from probing
314 */
315 static lgrp_plat_probe_stats_t lgrp_plat_probe_stats;
316
317 /*
318 * Memory configuration for probing
319 */
320 static lgrp_plat_probe_mem_config_t lgrp_plat_probe_mem_config;
321
322 /*
323 * Lowest proximity domain ID seen in ACPI SRAT
324 */
325 static uint32_t lgrp_plat_prox_domain_min = UINT32_MAX;
326
327 /*
328 * Error code from processing ACPI SRAT
329 */
330 static int lgrp_plat_srat_error = 0;
331
332 /*
333 * Error code from processing ACPI SLIT
334 */
335 static int lgrp_plat_slit_error = 0;
336
337 /*
338 * Whether lgrp topology has been flattened to 2 levels.
339 */
340 static int lgrp_plat_topo_flatten = 0;
341
342
343 /*
344 * Maximum memory node ID in use.
345 */
346 static uint_t lgrp_plat_max_mem_node;
347
348 /*
349 * Allocate lgroup array statically
350 */
351 static lgrp_t lgrp_space[NLGRP];
352 static int nlgrps_alloc;
353
354
355 /*
356 * Enable finding and using minimum proximity domain ID when hashing
357 */
358 int lgrp_plat_domain_min_enable = 1;
359
360 /*
361 * Maximum possible number of nodes in system
362 */
363 uint_t lgrp_plat_node_cnt = 1;
364
365 /*
366 * Enable sorting nodes in ascending order by starting physical address
367 */
368 int lgrp_plat_node_sort_enable = 1;
369
370 /*
371 * Configuration Parameters for Probing
372 * - lgrp_plat_probe_flags Flags to specify enabling probing, probe
373 * operation, etc.
374 * - lgrp_plat_probe_nrounds How many rounds of probing to do
375 * - lgrp_plat_probe_nsamples Number of samples to take when probing each
376 * node
377 * - lgrp_plat_probe_nreads Number of times to read vendor ID from
378 * Northbridge for each probe
379 */
380 uint_t lgrp_plat_probe_flags = 0;
381 int lgrp_plat_probe_nrounds = LGRP_PLAT_PROBE_NROUNDS;
382 int lgrp_plat_probe_nsamples = LGRP_PLAT_PROBE_NSAMPLES;
383 int lgrp_plat_probe_nreads = LGRP_PLAT_PROBE_NREADS;
384
385 /*
386 * Enable use of ACPI System Resource Affinity Table (SRAT), System
387 * Locality Information Table (SLIT) and Maximum System Capability Table (MSCT)
388 */
389 int lgrp_plat_srat_enable = 1;
390 int lgrp_plat_slit_enable = 1;
391 int lgrp_plat_msct_enable = 1;
392
393 /*
394 * mnode_xwa: set to non-zero value to initiate workaround if large pages are
395 * found to be crossing memory node boundaries. The workaround will eliminate
396 * a base size page at the end of each memory node boundary to ensure that
397 * a large page with constituent pages that span more than 1 memory node
398 * can never be formed.
399 *
400 */
401 int mnode_xwa = 1;
402
403 /*
404 * Static array to hold lgroup statistics
405 */
406 struct lgrp_stats lgrp_stats[NLGRP];
407
408
409 /*
410 * Forward declarations of platform interface routines
411 */
412 void plat_build_mem_nodes(struct memlist *list);
413
414 int plat_mnode_xcheck(pfn_t pfncnt);
415
416 lgrp_handle_t plat_mem_node_to_lgrphand(int mnode);
417
418 int plat_pfn_to_mem_node(pfn_t pfn);
419
420 /*
421 * Forward declarations of lgroup platform interface routines
422 */
423 lgrp_t *lgrp_plat_alloc(lgrp_id_t lgrpid);
424
425 void lgrp_plat_config(lgrp_config_flag_t flag, uintptr_t arg);
426
427 lgrp_handle_t lgrp_plat_cpu_to_hand(processorid_t id);
428
429 void lgrp_plat_init(lgrp_init_stages_t stage);
430
431 int lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to);
432
433 int lgrp_plat_max_lgrps(void);
434
435 pgcnt_t lgrp_plat_mem_size(lgrp_handle_t plathand,
436 lgrp_mem_query_t query);
437
438 lgrp_handle_t lgrp_plat_pfn_to_hand(pfn_t pfn);
439
440 void lgrp_plat_probe(void);
441
442 lgrp_handle_t lgrp_plat_root_hand(void);
443
444
445 /*
446 * Forward declarations of local routines
447 */
448 static int is_opteron(void);
449
450 static int lgrp_plat_cpu_node_update(node_domain_map_t *node_domain,
451 int node_cnt, cpu_node_map_t *cpu_node, int nentries, uint32_t apicid,
452 uint32_t domain);
453
454 static int lgrp_plat_cpu_to_node(cpu_t *cp, cpu_node_map_t *cpu_node,
455 int cpu_node_nentries);
456
457 static int lgrp_plat_domain_to_node(node_domain_map_t *node_domain,
458 int node_cnt, uint32_t domain);
459
460 static void lgrp_plat_get_numa_config(void);
461
462 static void lgrp_plat_latency_adjust(memnode_phys_addr_map_t *memnode_info,
463 lgrp_plat_latency_stats_t *lat_stats,
464 lgrp_plat_probe_stats_t *probe_stats);
465
466 static int lgrp_plat_latency_verify(memnode_phys_addr_map_t *memnode_info,
467 lgrp_plat_latency_stats_t *lat_stats);
468
469 static void lgrp_plat_main_init(void);
470
471 static pgcnt_t lgrp_plat_mem_size_default(lgrp_handle_t, lgrp_mem_query_t);
472
473 static int lgrp_plat_node_domain_update(node_domain_map_t *node_domain,
474 int node_cnt, uint32_t domain);
475
476 static int lgrp_plat_memnode_info_update(node_domain_map_t *node_domain,
477 int node_cnt, memnode_phys_addr_map_t *memnode_info, int memnode_cnt,
478 uint64_t start, uint64_t end, uint32_t domain, uint32_t device_id);
479
480 static void lgrp_plat_node_sort(node_domain_map_t *node_domain,
481 int node_cnt, cpu_node_map_t *cpu_node, int cpu_count,
482 memnode_phys_addr_map_t *memnode_info);
483
484 static hrtime_t lgrp_plat_probe_time(int to, cpu_node_map_t *cpu_node,
485 int cpu_node_nentries, lgrp_plat_probe_mem_config_t *probe_mem_config,
486 lgrp_plat_latency_stats_t *lat_stats, lgrp_plat_probe_stats_t *probe_stats);
487
488 static int lgrp_plat_process_cpu_apicids(cpu_node_map_t *cpu_node);
489
490 static int lgrp_plat_process_slit(ACPI_TABLE_SLIT *tp,
491 node_domain_map_t *node_domain, uint_t node_cnt,
492 memnode_phys_addr_map_t *memnode_info,
493 lgrp_plat_latency_stats_t *lat_stats);
494
495 static int lgrp_plat_process_sli(uint32_t domain, uchar_t *sli_info,
496 uint32_t sli_cnt, node_domain_map_t *node_domain, uint_t node_cnt,
497 lgrp_plat_latency_stats_t *lat_stats);
498
499 static int lgrp_plat_process_srat(ACPI_TABLE_SRAT *tp, ACPI_TABLE_MSCT *mp,
500 uint32_t *prox_domain_min, node_domain_map_t *node_domain,
501 cpu_node_map_t *cpu_node, int cpu_count,
502 memnode_phys_addr_map_t *memnode_info);
503
504 static void lgrp_plat_release_bootstrap(void);
505
506 static int lgrp_plat_srat_domains(ACPI_TABLE_SRAT *tp,
507 uint32_t *prox_domain_min);
508
509 static int lgrp_plat_msct_domains(ACPI_TABLE_MSCT *tp,
510 uint32_t *prox_domain_min);
511
512 static void lgrp_plat_2level_setup(lgrp_plat_latency_stats_t *lat_stats);
513
514 static void opt_get_numa_config(uint_t *node_cnt, int *mem_intrlv,
515 memnode_phys_addr_map_t *memnode_info);
516
517 static hrtime_t opt_probe_vendor(int dest_node, int nreads);
518
519
520 /*
521 * PLATFORM INTERFACE ROUTINES
522 */
523
524 /*
525 * Configure memory nodes for machines with more than one node (ie NUMA)
526 */
527 void
plat_build_mem_nodes(struct memlist * list)528 plat_build_mem_nodes(struct memlist *list)
529 {
530 pfn_t cur_start; /* start addr of subrange */
531 pfn_t cur_end; /* end addr of subrange */
532 pfn_t start; /* start addr of whole range */
533 pfn_t end; /* end addr of whole range */
534 pgcnt_t endcnt; /* pages to sacrifice */
535
536 /*
537 * Boot install lists are arranged <addr, len>, ...
538 */
539 while (list) {
540 int node;
541
542 start = list->ml_address >> PAGESHIFT;
543 end = (list->ml_address + list->ml_size - 1) >> PAGESHIFT;
544
545 if (start > physmax) {
546 list = list->ml_next;
547 continue;
548 }
549 if (end > physmax)
550 end = physmax;
551
552 /*
553 * When there is only one memnode, just add memory to memnode
554 */
555 if (max_mem_nodes == 1) {
556 mem_node_add_slice(start, end);
557 list = list->ml_next;
558 continue;
559 }
560
561 /*
562 * mem_node_add_slice() expects to get a memory range that
563 * is within one memnode, so need to split any memory range
564 * that spans multiple memnodes into subranges that are each
565 * contained within one memnode when feeding them to
566 * mem_node_add_slice()
567 */
568 cur_start = start;
569 do {
570 node = plat_pfn_to_mem_node(cur_start);
571
572 /*
573 * Panic if DRAM address map registers or SRAT say
574 * memory in node doesn't exist or address from
575 * boot installed memory list entry isn't in this node.
576 * This shouldn't happen and rest of code can't deal
577 * with this if it does.
578 */
579 if (node < 0 || node >= lgrp_plat_max_mem_node ||
580 !lgrp_plat_memnode_info[node].exists ||
581 cur_start < lgrp_plat_memnode_info[node].start ||
582 cur_start > lgrp_plat_memnode_info[node].end) {
583 cmn_err(CE_PANIC, "Don't know which memnode "
584 "to add installed memory address 0x%lx\n",
585 cur_start);
586 }
587
588 /*
589 * End of current subrange should not span memnodes
590 */
591 cur_end = end;
592 endcnt = 0;
593 if (lgrp_plat_memnode_info[node].exists &&
594 cur_end > lgrp_plat_memnode_info[node].end) {
595 cur_end = lgrp_plat_memnode_info[node].end;
596 if (mnode_xwa > 1) {
597 /*
598 * sacrifice the last page in each
599 * node to eliminate large pages
600 * that span more than 1 memory node.
601 */
602 endcnt = 1;
603 physinstalled--;
604 }
605 }
606
607 mem_node_add_slice(cur_start, cur_end - endcnt);
608
609 /*
610 * Next subrange starts after end of current one
611 */
612 cur_start = cur_end + 1;
613 } while (cur_end < end);
614
615 list = list->ml_next;
616 }
617 mem_node_physalign = 0;
618 mem_node_pfn_shift = 0;
619 }
620
621
622 /*
623 * plat_mnode_xcheck: checks the node memory ranges to see if there is a pfncnt
624 * range of pages aligned on pfncnt that crosses an node boundary. Returns 1 if
625 * a crossing is found and returns 0 otherwise.
626 */
627 int
plat_mnode_xcheck(pfn_t pfncnt)628 plat_mnode_xcheck(pfn_t pfncnt)
629 {
630 int node, prevnode = -1, basenode;
631 pfn_t ea, sa;
632
633 for (node = 0; node < lgrp_plat_max_mem_node; node++) {
634
635 if (lgrp_plat_memnode_info[node].exists == 0)
636 continue;
637
638 if (prevnode == -1) {
639 prevnode = node;
640 basenode = node;
641 continue;
642 }
643
644 /* assume x86 node pfn ranges are in increasing order */
645 ASSERT(lgrp_plat_memnode_info[node].start >
646 lgrp_plat_memnode_info[prevnode].end);
647
648 /*
649 * continue if the starting address of node is not contiguous
650 * with the previous node.
651 */
652
653 if (lgrp_plat_memnode_info[node].start !=
654 (lgrp_plat_memnode_info[prevnode].end + 1)) {
655 basenode = node;
656 prevnode = node;
657 continue;
658 }
659
660 /* check if the starting address of node is pfncnt aligned */
661 if ((lgrp_plat_memnode_info[node].start & (pfncnt - 1)) != 0) {
662
663 /*
664 * at this point, node starts at an unaligned boundary
665 * and is contiguous with the previous node(s) to
666 * basenode. Check if there is an aligned contiguous
667 * range of length pfncnt that crosses this boundary.
668 */
669
670 sa = P2ALIGN(lgrp_plat_memnode_info[prevnode].end,
671 pfncnt);
672 ea = P2ROUNDUP((lgrp_plat_memnode_info[node].start),
673 pfncnt);
674
675 ASSERT((ea - sa) == pfncnt);
676 if (sa >= lgrp_plat_memnode_info[basenode].start &&
677 ea <= (lgrp_plat_memnode_info[node].end + 1)) {
678 /*
679 * large page found to cross mnode boundary.
680 * Return Failure if workaround not enabled.
681 */
682 if (mnode_xwa == 0)
683 return (1);
684 mnode_xwa++;
685 }
686 }
687 prevnode = node;
688 }
689 return (0);
690 }
691
692
693 lgrp_handle_t
plat_mem_node_to_lgrphand(int mnode)694 plat_mem_node_to_lgrphand(int mnode)
695 {
696 if (max_mem_nodes == 1)
697 return (LGRP_DEFAULT_HANDLE);
698
699 ASSERT(0 <= mnode && mnode < lgrp_plat_max_mem_node);
700
701 return ((lgrp_handle_t)(lgrp_plat_memnode_info[mnode].lgrphand));
702 }
703
704 int
plat_pfn_to_mem_node(pfn_t pfn)705 plat_pfn_to_mem_node(pfn_t pfn)
706 {
707 int node;
708
709 if (max_mem_nodes == 1)
710 return (0);
711
712 for (node = 0; node < lgrp_plat_max_mem_node; node++) {
713 /*
714 * Skip nodes with no memory
715 */
716 if (!lgrp_plat_memnode_info[node].exists)
717 continue;
718
719 membar_consumer();
720 if (pfn >= lgrp_plat_memnode_info[node].start &&
721 pfn <= lgrp_plat_memnode_info[node].end)
722 return (node);
723 }
724
725 /*
726 * Didn't find memnode where this PFN lives which should never happen
727 */
728 ASSERT(node < lgrp_plat_max_mem_node);
729 return (-1);
730 }
731
732
733 /*
734 * LGROUP PLATFORM INTERFACE ROUTINES
735 */
736
737 /*
738 * Allocate additional space for an lgroup.
739 */
740 lgrp_t *
lgrp_plat_alloc(lgrp_id_t lgrpid)741 lgrp_plat_alloc(lgrp_id_t lgrpid)
742 {
743 lgrp_t *lgrp;
744
745 lgrp = &lgrp_space[nlgrps_alloc++];
746 if (lgrpid >= NLGRP || nlgrps_alloc > NLGRP)
747 return (NULL);
748 return (lgrp);
749 }
750
751
752 /*
753 * Platform handling for (re)configuration changes
754 *
755 * Mechanism to protect lgrp_plat_cpu_node[] at CPU hotplug:
756 * 1) Use cpu_lock to synchronize between lgrp_plat_config() and
757 * lgrp_plat_cpu_to_hand().
758 * 2) Disable latency probing logic by making sure that the flag
759 * LGRP_PLAT_PROBE_ENABLE is cleared.
760 *
761 * Mechanism to protect lgrp_plat_memnode_info[] at memory hotplug:
762 * 1) Only inserts into lgrp_plat_memnode_info at memory hotplug, no removal.
763 * 2) Only expansion to existing entries, no shrinking.
764 * 3) On writing side, DR framework ensures that lgrp_plat_config() is called
765 * in single-threaded context. And membar_producer() is used to ensure that
766 * all changes are visible to other CPUs before setting the "exists" flag.
767 * 4) On reading side, membar_consumer() after checking the "exists" flag
768 * ensures that right values are retrieved.
769 *
770 * Mechanism to protect lgrp_plat_node_domain[] at hotplug:
771 * 1) Only insertion into lgrp_plat_node_domain at hotplug, no removal.
772 * 2) On writing side, it's single-threaded and membar_producer() is used to
773 * ensure all changes are visible to other CPUs before setting the "exists"
774 * flag.
775 * 3) On reading side, membar_consumer() after checking the "exists" flag
776 * ensures that right values are retrieved.
777 */
778 void
lgrp_plat_config(lgrp_config_flag_t flag,uintptr_t arg)779 lgrp_plat_config(lgrp_config_flag_t flag, uintptr_t arg)
780 {
781 #ifdef __xpv
782 _NOTE(ARGUNUSED(flag, arg));
783 #else
784 int rc, node;
785 cpu_t *cp;
786 void *hdl = NULL;
787 uchar_t *sliptr = NULL;
788 uint32_t domain, apicid, slicnt = 0;
789 update_membounds_t *mp;
790
791 extern int acpidev_dr_get_cpu_numa_info(cpu_t *, void **, uint32_t *,
792 uint32_t *, uint32_t *, uchar_t **);
793 extern void acpidev_dr_free_cpu_numa_info(void *);
794
795 /*
796 * This interface is used to support CPU/memory DR operations.
797 * Don't bother here if it's still during boot or only one lgrp node
798 * is supported.
799 */
800 if (!lgrp_topo_initialized || lgrp_plat_node_cnt == 1)
801 return;
802
803 switch (flag) {
804 case LGRP_CONFIG_CPU_ADD:
805 cp = (cpu_t *)arg;
806 ASSERT(cp != NULL);
807 ASSERT(MUTEX_HELD(&cpu_lock));
808
809 /* Check whether CPU already exists. */
810 ASSERT(!lgrp_plat_cpu_node[cp->cpu_id].exists);
811 if (lgrp_plat_cpu_node[cp->cpu_id].exists) {
812 cmn_err(CE_WARN,
813 "!lgrp: CPU(%d) already exists in cpu_node map.",
814 cp->cpu_id);
815 break;
816 }
817
818 /* Query CPU lgrp information. */
819 rc = acpidev_dr_get_cpu_numa_info(cp, &hdl, &apicid, &domain,
820 &slicnt, &sliptr);
821 ASSERT(rc == 0);
822 if (rc != 0) {
823 cmn_err(CE_WARN,
824 "!lgrp: failed to query lgrp info for CPU(%d).",
825 cp->cpu_id);
826 break;
827 }
828
829 /* Update node to proximity domain mapping */
830 node = lgrp_plat_domain_to_node(lgrp_plat_node_domain,
831 lgrp_plat_node_cnt, domain);
832 if (node == -1) {
833 node = lgrp_plat_node_domain_update(
834 lgrp_plat_node_domain, lgrp_plat_node_cnt, domain);
835 ASSERT(node != -1);
836 if (node == -1) {
837 acpidev_dr_free_cpu_numa_info(hdl);
838 cmn_err(CE_WARN, "!lgrp: failed to update "
839 "node_domain map for domain(%u).", domain);
840 break;
841 }
842 }
843
844 /* Update latency information among lgrps. */
845 if (slicnt != 0 && sliptr != NULL) {
846 if (lgrp_plat_process_sli(domain, sliptr, slicnt,
847 lgrp_plat_node_domain, lgrp_plat_node_cnt,
848 &lgrp_plat_lat_stats) != 0) {
849 cmn_err(CE_WARN, "!lgrp: failed to update "
850 "latency information for domain (%u).",
851 domain);
852 }
853 }
854
855 /* Update CPU to node mapping. */
856 lgrp_plat_cpu_node[cp->cpu_id].prox_domain = domain;
857 lgrp_plat_cpu_node[cp->cpu_id].node = node;
858 lgrp_plat_cpu_node[cp->cpu_id].apicid = apicid;
859 lgrp_plat_cpu_node[cp->cpu_id].exists = 1;
860 lgrp_plat_apic_ncpus++;
861
862 acpidev_dr_free_cpu_numa_info(hdl);
863 break;
864
865 case LGRP_CONFIG_CPU_DEL:
866 cp = (cpu_t *)arg;
867 ASSERT(cp != NULL);
868 ASSERT(MUTEX_HELD(&cpu_lock));
869
870 /* Check whether CPU exists. */
871 ASSERT(lgrp_plat_cpu_node[cp->cpu_id].exists);
872 if (!lgrp_plat_cpu_node[cp->cpu_id].exists) {
873 cmn_err(CE_WARN,
874 "!lgrp: CPU(%d) doesn't exist in cpu_node map.",
875 cp->cpu_id);
876 break;
877 }
878
879 /* Query CPU lgrp information. */
880 rc = acpidev_dr_get_cpu_numa_info(cp, &hdl, &apicid, &domain,
881 NULL, NULL);
882 ASSERT(rc == 0);
883 if (rc != 0) {
884 cmn_err(CE_WARN,
885 "!lgrp: failed to query lgrp info for CPU(%d).",
886 cp->cpu_id);
887 break;
888 }
889
890 /* Update map. */
891 ASSERT(lgrp_plat_cpu_node[cp->cpu_id].apicid == apicid);
892 ASSERT(lgrp_plat_cpu_node[cp->cpu_id].prox_domain == domain);
893 lgrp_plat_cpu_node[cp->cpu_id].exists = 0;
894 lgrp_plat_cpu_node[cp->cpu_id].apicid = UINT32_MAX;
895 lgrp_plat_cpu_node[cp->cpu_id].prox_domain = UINT32_MAX;
896 lgrp_plat_cpu_node[cp->cpu_id].node = UINT_MAX;
897 lgrp_plat_apic_ncpus--;
898
899 acpidev_dr_free_cpu_numa_info(hdl);
900 break;
901
902 case LGRP_CONFIG_MEM_ADD:
903 mp = (update_membounds_t *)arg;
904 ASSERT(mp != NULL);
905
906 /* Update latency information among lgrps. */
907 if (mp->u_sli_cnt != 0 && mp->u_sli_ptr != NULL) {
908 if (lgrp_plat_process_sli(mp->u_domain,
909 mp->u_sli_ptr, mp->u_sli_cnt,
910 lgrp_plat_node_domain, lgrp_plat_node_cnt,
911 &lgrp_plat_lat_stats) != 0) {
912 cmn_err(CE_WARN, "!lgrp: failed to update "
913 "latency information for domain (%u).",
914 domain);
915 }
916 }
917
918 if (lgrp_plat_memnode_info_update(lgrp_plat_node_domain,
919 lgrp_plat_node_cnt, lgrp_plat_memnode_info, max_mem_nodes,
920 mp->u_base, mp->u_base + mp->u_length,
921 mp->u_domain, mp->u_device_id) < 0) {
922 cmn_err(CE_WARN,
923 "!lgrp: failed to update latency information for "
924 "memory (0x%" PRIx64 " - 0x%" PRIx64 ").",
925 mp->u_base, mp->u_base + mp->u_length);
926 }
927 break;
928
929 default:
930 break;
931 }
932 #endif /* __xpv */
933 }
934
935
936 /*
937 * Return the platform handle for the lgroup containing the given CPU
938 */
939 lgrp_handle_t
lgrp_plat_cpu_to_hand(processorid_t id)940 lgrp_plat_cpu_to_hand(processorid_t id)
941 {
942 lgrp_handle_t hand;
943
944 ASSERT(!lgrp_initialized || MUTEX_HELD(&cpu_lock));
945
946 if (lgrp_plat_node_cnt == 1)
947 return (LGRP_DEFAULT_HANDLE);
948
949 hand = (lgrp_handle_t)lgrp_plat_cpu_to_node(cpu[id],
950 lgrp_plat_cpu_node, lgrp_plat_cpu_node_nentries);
951
952 ASSERT(hand != (lgrp_handle_t)-1);
953 if (hand == (lgrp_handle_t)-1)
954 return (LGRP_NULL_HANDLE);
955
956 return (hand);
957 }
958
959
960 /*
961 * Platform-specific initialization of lgroups
962 */
963 void
lgrp_plat_init(lgrp_init_stages_t stage)964 lgrp_plat_init(lgrp_init_stages_t stage)
965 {
966 #if defined(__xpv)
967 #else /* __xpv */
968 u_longlong_t value;
969 #endif /* __xpv */
970
971 switch (stage) {
972 case LGRP_INIT_STAGE1:
973 #if defined(__xpv)
974 /*
975 * XXPV For now, the hypervisor treats all memory equally.
976 */
977 lgrp_plat_node_cnt = max_mem_nodes = 1;
978 #else /* __xpv */
979
980 /*
981 * Get boot property for lgroup topology height limit
982 */
983 if (bootprop_getval(BP_LGRP_TOPO_LEVELS, &value) == 0)
984 (void) lgrp_topo_ht_limit_set((int)value);
985
986 /*
987 * Get boot property for enabling/disabling SRAT
988 */
989 if (bootprop_getval(BP_LGRP_SRAT_ENABLE, &value) == 0)
990 lgrp_plat_srat_enable = (int)value;
991
992 /*
993 * Get boot property for enabling/disabling SLIT
994 */
995 if (bootprop_getval(BP_LGRP_SLIT_ENABLE, &value) == 0)
996 lgrp_plat_slit_enable = (int)value;
997
998 /*
999 * Get boot property for enabling/disabling MSCT
1000 */
1001 if (bootprop_getval(BP_LGRP_MSCT_ENABLE, &value) == 0)
1002 lgrp_plat_msct_enable = (int)value;
1003
1004 /*
1005 * Initialize as a UMA machine
1006 */
1007 if (lgrp_topo_ht_limit() == 1) {
1008 lgrp_plat_node_cnt = max_mem_nodes = 1;
1009 lgrp_plat_max_mem_node = 1;
1010 return;
1011 }
1012
1013 lgrp_plat_get_numa_config();
1014
1015 /*
1016 * Each lgrp node needs MAX_MEM_NODES_PER_LGROUP memnodes
1017 * to support memory DR operations if memory DR is enabled.
1018 */
1019 lgrp_plat_max_mem_node = lgrp_plat_node_cnt;
1020 if (plat_dr_support_memory() && lgrp_plat_node_cnt != 1) {
1021 max_mem_nodes = MAX_MEM_NODES_PER_LGROUP *
1022 lgrp_plat_node_cnt;
1023 ASSERT(max_mem_nodes <= MAX_MEM_NODES);
1024 }
1025 #endif /* __xpv */
1026 break;
1027
1028 case LGRP_INIT_STAGE3:
1029 lgrp_plat_probe();
1030 lgrp_plat_release_bootstrap();
1031 break;
1032
1033 case LGRP_INIT_STAGE4:
1034 lgrp_plat_main_init();
1035 break;
1036
1037 default:
1038 break;
1039 }
1040 }
1041
1042
1043 /*
1044 * Return latency between "from" and "to" lgroups
1045 *
1046 * This latency number can only be used for relative comparison
1047 * between lgroups on the running system, cannot be used across platforms,
1048 * and may not reflect the actual latency. It is platform and implementation
1049 * specific, so platform gets to decide its value. It would be nice if the
1050 * number was at least proportional to make comparisons more meaningful though.
1051 */
1052 int
lgrp_plat_latency(lgrp_handle_t from,lgrp_handle_t to)1053 lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to)
1054 {
1055 lgrp_handle_t src, dest;
1056 int node;
1057
1058 if (max_mem_nodes == 1)
1059 return (0);
1060
1061 /*
1062 * Return max latency for root lgroup
1063 */
1064 if (from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE)
1065 return (lgrp_plat_lat_stats.latency_max);
1066
1067 src = from;
1068 dest = to;
1069
1070 /*
1071 * Return 0 for nodes (lgroup platform handles) out of range
1072 */
1073 if (src >= MAX_NODES || dest >= MAX_NODES)
1074 return (0);
1075
1076 /*
1077 * Probe from current CPU if its lgroup latencies haven't been set yet
1078 * and we are trying to get latency from current CPU to some node.
1079 * Avoid probing if CPU/memory DR is enabled.
1080 */
1081 if (lgrp_plat_lat_stats.latencies[src][src] == 0) {
1082 /*
1083 * Latency information should be updated by lgrp_plat_config()
1084 * for DR operations. Something is wrong if reaches here.
1085 * For safety, flatten lgrp topology to two levels.
1086 */
1087 if (plat_dr_support_cpu() || plat_dr_support_memory()) {
1088 ASSERT(lgrp_plat_lat_stats.latencies[src][src]);
1089 cmn_err(CE_WARN,
1090 "lgrp: failed to get latency information, "
1091 "fall back to two-level topology.");
1092 lgrp_plat_2level_setup(&lgrp_plat_lat_stats);
1093 } else {
1094 node = lgrp_plat_cpu_to_node(CPU, lgrp_plat_cpu_node,
1095 lgrp_plat_cpu_node_nentries);
1096 ASSERT(node >= 0 && node < lgrp_plat_node_cnt);
1097 if (node == src)
1098 lgrp_plat_probe();
1099 }
1100 }
1101
1102 return (lgrp_plat_lat_stats.latencies[src][dest]);
1103 }
1104
1105
1106 /*
1107 * Return the maximum number of lgrps supported by the platform.
1108 * Before lgrp topology is known it returns an estimate based on the number of
1109 * nodes. Once topology is known it returns:
1110 * 1) the actual maximim number of lgrps created if CPU/memory DR operations
1111 * are not suppported.
1112 * 2) the maximum possible number of lgrps if CPU/memory DR operations are
1113 * supported.
1114 */
1115 int
lgrp_plat_max_lgrps(void)1116 lgrp_plat_max_lgrps(void)
1117 {
1118 if (!lgrp_topo_initialized || plat_dr_support_cpu() ||
1119 plat_dr_support_memory()) {
1120 return (lgrp_plat_node_cnt * (lgrp_plat_node_cnt - 1) + 1);
1121 } else {
1122 return (lgrp_alloc_max + 1);
1123 }
1124 }
1125
1126
1127 /*
1128 * Count number of memory pages (_t) based on mnode id (_n) and query type (_t).
1129 */
1130 #define _LGRP_PLAT_MEM_SIZE(_n, _q, _t) \
1131 if (mem_node_config[_n].exists) { \
1132 switch (_q) { \
1133 case LGRP_MEM_SIZE_FREE: \
1134 _t += MNODE_PGCNT(_n); \
1135 break; \
1136 case LGRP_MEM_SIZE_AVAIL: \
1137 _t += mem_node_memlist_pages(_n, phys_avail); \
1138 break; \
1139 case LGRP_MEM_SIZE_INSTALL: \
1140 _t += mem_node_memlist_pages(_n, phys_install); \
1141 break; \
1142 default: \
1143 break; \
1144 } \
1145 }
1146
1147 /*
1148 * Return the number of free pages in an lgroup.
1149 *
1150 * For query of LGRP_MEM_SIZE_FREE, return the number of base pagesize
1151 * pages on freelists. For query of LGRP_MEM_SIZE_AVAIL, return the
1152 * number of allocatable base pagesize pages corresponding to the
1153 * lgroup (e.g. do not include page_t's, BOP_ALLOC()'ed memory, ..)
1154 * For query of LGRP_MEM_SIZE_INSTALL, return the amount of physical
1155 * memory installed, regardless of whether or not it's usable.
1156 */
1157 pgcnt_t
lgrp_plat_mem_size(lgrp_handle_t plathand,lgrp_mem_query_t query)1158 lgrp_plat_mem_size(lgrp_handle_t plathand, lgrp_mem_query_t query)
1159 {
1160 int mnode;
1161 pgcnt_t npgs = (pgcnt_t)0;
1162 extern struct memlist *phys_avail;
1163 extern struct memlist *phys_install;
1164
1165
1166 if (plathand == LGRP_DEFAULT_HANDLE)
1167 return (lgrp_plat_mem_size_default(plathand, query));
1168
1169 if (plathand != LGRP_NULL_HANDLE) {
1170 /* Count memory node present at boot. */
1171 mnode = (int)plathand;
1172 ASSERT(mnode < lgrp_plat_node_cnt);
1173 _LGRP_PLAT_MEM_SIZE(mnode, query, npgs);
1174
1175 /* Count possible hot-added memory nodes. */
1176 for (mnode = lgrp_plat_node_cnt;
1177 mnode < lgrp_plat_max_mem_node; mnode++) {
1178 if (lgrp_plat_memnode_info[mnode].lgrphand == plathand)
1179 _LGRP_PLAT_MEM_SIZE(mnode, query, npgs);
1180 }
1181 }
1182
1183 return (npgs);
1184 }
1185
1186
1187 /*
1188 * Return the platform handle of the lgroup that contains the physical memory
1189 * corresponding to the given page frame number
1190 */
1191 lgrp_handle_t
lgrp_plat_pfn_to_hand(pfn_t pfn)1192 lgrp_plat_pfn_to_hand(pfn_t pfn)
1193 {
1194 int mnode;
1195
1196 if (max_mem_nodes == 1)
1197 return (LGRP_DEFAULT_HANDLE);
1198
1199 if (pfn > physmax)
1200 return (LGRP_NULL_HANDLE);
1201
1202 mnode = plat_pfn_to_mem_node(pfn);
1203 if (mnode < 0)
1204 return (LGRP_NULL_HANDLE);
1205
1206 return (MEM_NODE_2_LGRPHAND(mnode));
1207 }
1208
1209
1210 /*
1211 * Probe memory in each node from current CPU to determine latency topology
1212 *
1213 * The probing code will probe the vendor ID register on the Northbridge of
1214 * Opteron processors and probe memory for other processors by default.
1215 *
1216 * Since probing is inherently error prone, the code takes laps across all the
1217 * nodes probing from each node to each of the other nodes some number of
1218 * times. Furthermore, each node is probed some number of times before moving
1219 * onto the next one during each lap. The minimum latency gotten between nodes
1220 * is kept as the latency between the nodes.
1221 *
1222 * After all that, the probe times are adjusted by normalizing values that are
1223 * close to each other and local latencies are made the same. Lastly, the
1224 * latencies are verified to make sure that certain conditions are met (eg.
1225 * local < remote, latency(a, b) == latency(b, a), etc.).
1226 *
1227 * If any of the conditions aren't met, the code will export a NUMA
1228 * configuration with the local CPUs and memory given by the SRAT or PCI config
1229 * space registers and one remote memory latency since it can't tell exactly
1230 * how far each node is from each other.
1231 */
1232 void
lgrp_plat_probe(void)1233 lgrp_plat_probe(void)
1234 {
1235 int from;
1236 int i;
1237 lgrp_plat_latency_stats_t *lat_stats;
1238 boolean_t probed;
1239 hrtime_t probe_time;
1240 int to;
1241
1242 if (!(lgrp_plat_probe_flags & LGRP_PLAT_PROBE_ENABLE) ||
1243 max_mem_nodes == 1 || lgrp_topo_ht_limit() <= 2)
1244 return;
1245
1246 /* SRAT and SLIT should be enabled if DR operations are enabled. */
1247 if (plat_dr_support_cpu() || plat_dr_support_memory())
1248 return;
1249
1250 /*
1251 * Determine ID of node containing current CPU
1252 */
1253 from = lgrp_plat_cpu_to_node(CPU, lgrp_plat_cpu_node,
1254 lgrp_plat_cpu_node_nentries);
1255 ASSERT(from >= 0 && from < lgrp_plat_node_cnt);
1256 if (srat_ptr && lgrp_plat_srat_enable && !lgrp_plat_srat_error)
1257 ASSERT(lgrp_plat_node_domain[from].exists);
1258
1259 /*
1260 * Don't need to probe if got times already
1261 */
1262 lat_stats = &lgrp_plat_lat_stats;
1263 if (lat_stats->latencies[from][from] != 0)
1264 return;
1265
1266 /*
1267 * Read vendor ID in Northbridge or read and write page(s)
1268 * in each node from current CPU and remember how long it takes,
1269 * so we can build latency topology of machine later.
1270 * This should approximate the memory latency between each node.
1271 */
1272 probed = B_FALSE;
1273 for (i = 0; i < lgrp_plat_probe_nrounds; i++) {
1274 for (to = 0; to < lgrp_plat_node_cnt; to++) {
1275 /*
1276 * Get probe time and skip over any nodes that can't be
1277 * probed yet or don't have memory
1278 */
1279 probe_time = lgrp_plat_probe_time(to,
1280 lgrp_plat_cpu_node, lgrp_plat_cpu_node_nentries,
1281 &lgrp_plat_probe_mem_config, &lgrp_plat_lat_stats,
1282 &lgrp_plat_probe_stats);
1283 if (probe_time == 0)
1284 continue;
1285
1286 probed = B_TRUE;
1287
1288 /*
1289 * Keep lowest probe time as latency between nodes
1290 */
1291 if (lat_stats->latencies[from][to] == 0 ||
1292 probe_time < lat_stats->latencies[from][to])
1293 lat_stats->latencies[from][to] = probe_time;
1294
1295 /*
1296 * Update overall minimum and maximum probe times
1297 * across all nodes
1298 */
1299 if (probe_time < lat_stats->latency_min ||
1300 lat_stats->latency_min == -1)
1301 lat_stats->latency_min = probe_time;
1302 if (probe_time > lat_stats->latency_max)
1303 lat_stats->latency_max = probe_time;
1304 }
1305 }
1306
1307 /*
1308 * Bail out if weren't able to probe any nodes from current CPU
1309 */
1310 if (probed == B_FALSE)
1311 return;
1312
1313 /*
1314 * - Fix up latencies such that local latencies are same,
1315 * latency(i, j) == latency(j, i), etc. (if possible)
1316 *
1317 * - Verify that latencies look ok
1318 *
1319 * - Fallback to just optimizing for local and remote if
1320 * latencies didn't look right
1321 */
1322 lgrp_plat_latency_adjust(lgrp_plat_memnode_info, &lgrp_plat_lat_stats,
1323 &lgrp_plat_probe_stats);
1324 lgrp_plat_probe_stats.probe_error_code =
1325 lgrp_plat_latency_verify(lgrp_plat_memnode_info,
1326 &lgrp_plat_lat_stats);
1327 if (lgrp_plat_probe_stats.probe_error_code)
1328 lgrp_plat_2level_setup(&lgrp_plat_lat_stats);
1329 }
1330
1331
1332 /*
1333 * Return platform handle for root lgroup
1334 */
1335 lgrp_handle_t
lgrp_plat_root_hand(void)1336 lgrp_plat_root_hand(void)
1337 {
1338 return (LGRP_DEFAULT_HANDLE);
1339 }
1340
1341
1342 /*
1343 * INTERNAL ROUTINES
1344 */
1345
1346
1347 /*
1348 * Update CPU to node mapping for given CPU and proximity domain.
1349 * Return values:
1350 * - zero for success
1351 * - positive numbers for warnings
1352 * - negative numbers for errors
1353 */
1354 static int
lgrp_plat_cpu_node_update(node_domain_map_t * node_domain,int node_cnt,cpu_node_map_t * cpu_node,int nentries,uint32_t apicid,uint32_t domain)1355 lgrp_plat_cpu_node_update(node_domain_map_t *node_domain, int node_cnt,
1356 cpu_node_map_t *cpu_node, int nentries, uint32_t apicid, uint32_t domain)
1357 {
1358 uint_t i;
1359 int node;
1360
1361 /*
1362 * Get node number for proximity domain
1363 */
1364 node = lgrp_plat_domain_to_node(node_domain, node_cnt, domain);
1365 if (node == -1) {
1366 node = lgrp_plat_node_domain_update(node_domain, node_cnt,
1367 domain);
1368 if (node == -1)
1369 return (-1);
1370 }
1371
1372 /*
1373 * Search for entry with given APIC ID and fill in its node and
1374 * proximity domain IDs (if they haven't been set already)
1375 */
1376 for (i = 0; i < nentries; i++) {
1377 /*
1378 * Skip nonexistent entries and ones without matching APIC ID
1379 */
1380 if (!cpu_node[i].exists || cpu_node[i].apicid != apicid)
1381 continue;
1382
1383 /*
1384 * Just return if entry completely and correctly filled in
1385 * already
1386 */
1387 if (cpu_node[i].prox_domain == domain &&
1388 cpu_node[i].node == node)
1389 return (1);
1390
1391 /*
1392 * It's invalid to have more than one entry with the same
1393 * local APIC ID in SRAT table.
1394 */
1395 if (cpu_node[i].node != UINT_MAX)
1396 return (-2);
1397
1398 /*
1399 * Fill in node and proximity domain IDs
1400 */
1401 cpu_node[i].prox_domain = domain;
1402 cpu_node[i].node = node;
1403
1404 return (0);
1405 }
1406
1407 /*
1408 * It's possible that an apicid doesn't exist in the cpu_node map due
1409 * to user limits number of CPUs powered on at boot by specifying the
1410 * boot_ncpus kernel option.
1411 */
1412 return (2);
1413 }
1414
1415
1416 /*
1417 * Get node ID for given CPU
1418 */
1419 static int
lgrp_plat_cpu_to_node(cpu_t * cp,cpu_node_map_t * cpu_node,int cpu_node_nentries)1420 lgrp_plat_cpu_to_node(cpu_t *cp, cpu_node_map_t *cpu_node,
1421 int cpu_node_nentries)
1422 {
1423 processorid_t cpuid;
1424
1425 if (cp == NULL)
1426 return (-1);
1427
1428 cpuid = cp->cpu_id;
1429 if (cpuid < 0 || cpuid >= max_ncpus)
1430 return (-1);
1431
1432 /*
1433 * SRAT doesn't exist, isn't enabled, or there was an error processing
1434 * it, so return node ID for Opteron and -1 otherwise.
1435 */
1436 if (srat_ptr == NULL || !lgrp_plat_srat_enable ||
1437 lgrp_plat_srat_error) {
1438 if (is_opteron())
1439 return (pg_plat_hw_instance_id(cp, PGHW_PROCNODE));
1440 return (-1);
1441 }
1442
1443 /*
1444 * Return -1 when CPU to node ID mapping entry doesn't exist for given
1445 * CPU
1446 */
1447 if (cpuid >= cpu_node_nentries || !cpu_node[cpuid].exists)
1448 return (-1);
1449
1450 return (cpu_node[cpuid].node);
1451 }
1452
1453
1454 /*
1455 * Return node number for given proximity domain/system locality
1456 */
1457 static int
lgrp_plat_domain_to_node(node_domain_map_t * node_domain,int node_cnt,uint32_t domain)1458 lgrp_plat_domain_to_node(node_domain_map_t *node_domain, int node_cnt,
1459 uint32_t domain)
1460 {
1461 uint_t node;
1462 uint_t start;
1463
1464 /*
1465 * Hash proximity domain ID into node to domain mapping table (array),
1466 * search for entry with matching proximity domain ID, and return index
1467 * of matching entry as node ID.
1468 */
1469 node = start = NODE_DOMAIN_HASH(domain, node_cnt);
1470 do {
1471 if (node_domain[node].exists) {
1472 membar_consumer();
1473 if (node_domain[node].prox_domain == domain)
1474 return (node);
1475 }
1476 node = (node + 1) % node_cnt;
1477 } while (node != start);
1478 return (-1);
1479 }
1480
1481
1482 /*
1483 * Get NUMA configuration of machine
1484 */
1485 static void
lgrp_plat_get_numa_config(void)1486 lgrp_plat_get_numa_config(void)
1487 {
1488 uint_t probe_op;
1489
1490 /*
1491 * Read boot property with CPU to APIC ID mapping table/array to
1492 * determine number of CPUs
1493 */
1494 lgrp_plat_apic_ncpus = lgrp_plat_process_cpu_apicids(NULL);
1495
1496 /*
1497 * Determine which CPUs and memory are local to each other and number
1498 * of NUMA nodes by reading ACPI System Resource Affinity Table (SRAT)
1499 */
1500 if (lgrp_plat_apic_ncpus > 0) {
1501 int retval;
1502
1503 /* Reserve enough resources if CPU DR is enabled. */
1504 if (plat_dr_support_cpu() && max_ncpus > lgrp_plat_apic_ncpus)
1505 lgrp_plat_cpu_node_nentries = max_ncpus;
1506 else
1507 lgrp_plat_cpu_node_nentries = lgrp_plat_apic_ncpus;
1508
1509 /*
1510 * Temporarily allocate boot memory to use for CPU to node
1511 * mapping since kernel memory allocator isn't alive yet
1512 */
1513 lgrp_plat_cpu_node = (cpu_node_map_t *)BOP_ALLOC(bootops,
1514 NULL, lgrp_plat_cpu_node_nentries * sizeof (cpu_node_map_t),
1515 sizeof (int));
1516
1517 ASSERT(lgrp_plat_cpu_node != NULL);
1518 if (lgrp_plat_cpu_node) {
1519 bzero(lgrp_plat_cpu_node, lgrp_plat_cpu_node_nentries *
1520 sizeof (cpu_node_map_t));
1521 } else {
1522 lgrp_plat_cpu_node_nentries = 0;
1523 }
1524
1525 /*
1526 * Fill in CPU to node ID mapping table with APIC ID for each
1527 * CPU
1528 */
1529 (void) lgrp_plat_process_cpu_apicids(lgrp_plat_cpu_node);
1530
1531 retval = lgrp_plat_process_srat(srat_ptr, msct_ptr,
1532 &lgrp_plat_prox_domain_min,
1533 lgrp_plat_node_domain, lgrp_plat_cpu_node,
1534 lgrp_plat_apic_ncpus, lgrp_plat_memnode_info);
1535 if (retval <= 0) {
1536 lgrp_plat_srat_error = retval;
1537 lgrp_plat_node_cnt = 1;
1538 } else {
1539 lgrp_plat_srat_error = 0;
1540 lgrp_plat_node_cnt = retval;
1541 }
1542 }
1543
1544 /*
1545 * Try to use PCI config space registers on Opteron if there's an error
1546 * processing CPU to APIC ID mapping or SRAT
1547 */
1548 if ((lgrp_plat_apic_ncpus <= 0 || lgrp_plat_srat_error != 0) &&
1549 is_opteron())
1550 opt_get_numa_config(&lgrp_plat_node_cnt, &lgrp_plat_mem_intrlv,
1551 lgrp_plat_memnode_info);
1552
1553 /*
1554 * Don't bother to setup system for multiple lgroups and only use one
1555 * memory node when memory is interleaved between any nodes or there is
1556 * only one NUMA node
1557 */
1558 if (lgrp_plat_mem_intrlv || lgrp_plat_node_cnt == 1) {
1559 lgrp_plat_node_cnt = max_mem_nodes = 1;
1560 (void) lgrp_topo_ht_limit_set(1);
1561 return;
1562 }
1563
1564 /*
1565 * Leaf lgroups on x86/x64 architectures contain one physical
1566 * processor chip. Tune lgrp_expand_proc_thresh and
1567 * lgrp_expand_proc_diff so that lgrp_choose() will spread
1568 * things out aggressively.
1569 */
1570 lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX / 2;
1571 lgrp_expand_proc_diff = 0;
1572
1573 /*
1574 * There should be one memnode (physical page free list(s)) for
1575 * each node if memory DR is disabled.
1576 */
1577 max_mem_nodes = lgrp_plat_node_cnt;
1578
1579 /*
1580 * Initialize min and max latency before reading SLIT or probing
1581 */
1582 lgrp_plat_lat_stats.latency_min = -1;
1583 lgrp_plat_lat_stats.latency_max = 0;
1584
1585 /*
1586 * Determine how far each NUMA node is from each other by
1587 * reading ACPI System Locality Information Table (SLIT) if it
1588 * exists
1589 */
1590 lgrp_plat_slit_error = lgrp_plat_process_slit(slit_ptr,
1591 lgrp_plat_node_domain, lgrp_plat_node_cnt, lgrp_plat_memnode_info,
1592 &lgrp_plat_lat_stats);
1593
1594 /*
1595 * Disable support of CPU/memory DR operations if multiple locality
1596 * domains exist in system and either of following is true.
1597 * 1) Failed to process SLIT table.
1598 * 2) Latency probing is enabled by user.
1599 */
1600 if (lgrp_plat_node_cnt > 1 &&
1601 (plat_dr_support_cpu() || plat_dr_support_memory())) {
1602 if (!lgrp_plat_slit_enable || lgrp_plat_slit_error != 0 ||
1603 !lgrp_plat_srat_enable || lgrp_plat_srat_error != 0 ||
1604 lgrp_plat_apic_ncpus <= 0) {
1605 cmn_err(CE_CONT,
1606 "?lgrp: failed to process ACPI SRAT/SLIT table, "
1607 "disable support of CPU/memory DR operations.");
1608 plat_dr_disable_cpu();
1609 plat_dr_disable_memory();
1610 } else if (lgrp_plat_probe_flags & LGRP_PLAT_PROBE_ENABLE) {
1611 cmn_err(CE_CONT,
1612 "?lgrp: latency probing enabled by user, "
1613 "disable support of CPU/memory DR operations.");
1614 plat_dr_disable_cpu();
1615 plat_dr_disable_memory();
1616 }
1617 }
1618
1619 /* Done if succeeded to process SLIT table. */
1620 if (lgrp_plat_slit_error == 0)
1621 return;
1622
1623 /*
1624 * Probe to determine latency between NUMA nodes when SLIT
1625 * doesn't exist or make sense
1626 */
1627 lgrp_plat_probe_flags |= LGRP_PLAT_PROBE_ENABLE;
1628
1629 /*
1630 * Specify whether to probe using vendor ID register or page copy
1631 * if hasn't been specified already or is overspecified
1632 */
1633 probe_op = lgrp_plat_probe_flags &
1634 (LGRP_PLAT_PROBE_PGCPY|LGRP_PLAT_PROBE_VENDOR);
1635
1636 if (probe_op == 0 ||
1637 probe_op == (LGRP_PLAT_PROBE_PGCPY|LGRP_PLAT_PROBE_VENDOR)) {
1638 lgrp_plat_probe_flags &=
1639 ~(LGRP_PLAT_PROBE_PGCPY|LGRP_PLAT_PROBE_VENDOR);
1640 if (is_opteron())
1641 lgrp_plat_probe_flags |=
1642 LGRP_PLAT_PROBE_VENDOR;
1643 else
1644 lgrp_plat_probe_flags |= LGRP_PLAT_PROBE_PGCPY;
1645 }
1646
1647 /*
1648 * Probing errors can mess up the lgroup topology and
1649 * force us fall back to a 2 level lgroup topology.
1650 * Here we bound how tall the lgroup topology can grow
1651 * in hopes of avoiding any anamolies in probing from
1652 * messing up the lgroup topology by limiting the
1653 * accuracy of the latency topology.
1654 *
1655 * Assume that nodes will at least be configured in a
1656 * ring, so limit height of lgroup topology to be less
1657 * than number of nodes on a system with 4 or more
1658 * nodes
1659 */
1660 if (lgrp_plat_node_cnt >= 4 && lgrp_topo_ht_limit() ==
1661 lgrp_topo_ht_limit_default())
1662 (void) lgrp_topo_ht_limit_set(lgrp_plat_node_cnt - 1);
1663 }
1664
1665
1666 /*
1667 * Latencies must be within 1/(2**LGRP_LAT_TOLERANCE_SHIFT) of each other to
1668 * be considered same
1669 */
1670 #define LGRP_LAT_TOLERANCE_SHIFT 4
1671
1672 int lgrp_plat_probe_lt_shift = LGRP_LAT_TOLERANCE_SHIFT;
1673
1674
1675 /*
1676 * Adjust latencies between nodes to be symmetric, normalize latencies between
1677 * any nodes that are within some tolerance to be same, and make local
1678 * latencies be same
1679 */
1680 static void
lgrp_plat_latency_adjust(memnode_phys_addr_map_t * memnode_info,lgrp_plat_latency_stats_t * lat_stats,lgrp_plat_probe_stats_t * probe_stats)1681 lgrp_plat_latency_adjust(memnode_phys_addr_map_t *memnode_info,
1682 lgrp_plat_latency_stats_t *lat_stats, lgrp_plat_probe_stats_t *probe_stats)
1683 {
1684 int i;
1685 int j;
1686 int k;
1687 int l;
1688 u_longlong_t max;
1689 u_longlong_t min;
1690 u_longlong_t t;
1691 u_longlong_t t1;
1692 u_longlong_t t2;
1693 const lgrp_config_flag_t cflag = LGRP_CONFIG_LAT_CHANGE_ALL;
1694 int lat_corrected[MAX_NODES][MAX_NODES];
1695
1696 t = 0;
1697 /*
1698 * Nothing to do when this is an UMA machine or don't have args needed
1699 */
1700 if (max_mem_nodes == 1)
1701 return;
1702
1703 ASSERT(memnode_info != NULL && lat_stats != NULL &&
1704 probe_stats != NULL);
1705
1706 /*
1707 * Make sure that latencies are symmetric between any two nodes
1708 * (ie. latency(node0, node1) == latency(node1, node0))
1709 */
1710 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1711 if (!memnode_info[i].exists)
1712 continue;
1713
1714 for (j = 0; j < lgrp_plat_node_cnt; j++) {
1715 if (!memnode_info[j].exists)
1716 continue;
1717
1718 t1 = lat_stats->latencies[i][j];
1719 t2 = lat_stats->latencies[j][i];
1720
1721 if (t1 == 0 || t2 == 0 || t1 == t2)
1722 continue;
1723
1724 /*
1725 * Latencies should be same
1726 * - Use minimum of two latencies which should be same
1727 * - Track suspect probe times not within tolerance of
1728 * min value
1729 * - Remember how much values are corrected by
1730 */
1731 if (t1 > t2) {
1732 t = t2;
1733 probe_stats->probe_errors[i][j] += t1 - t2;
1734 if (t1 - t2 > t2 >> lgrp_plat_probe_lt_shift) {
1735 probe_stats->probe_suspect[i][j]++;
1736 probe_stats->probe_suspect[j][i]++;
1737 }
1738 } else if (t2 > t1) {
1739 t = t1;
1740 probe_stats->probe_errors[j][i] += t2 - t1;
1741 if (t2 - t1 > t1 >> lgrp_plat_probe_lt_shift) {
1742 probe_stats->probe_suspect[i][j]++;
1743 probe_stats->probe_suspect[j][i]++;
1744 }
1745 }
1746
1747 lat_stats->latencies[i][j] =
1748 lat_stats->latencies[j][i] = t;
1749 lgrp_config(cflag, t1, t);
1750 lgrp_config(cflag, t2, t);
1751 }
1752 }
1753
1754 /*
1755 * Keep track of which latencies get corrected
1756 */
1757 for (i = 0; i < MAX_NODES; i++)
1758 for (j = 0; j < MAX_NODES; j++)
1759 lat_corrected[i][j] = 0;
1760
1761 /*
1762 * For every two nodes, see whether there is another pair of nodes which
1763 * are about the same distance apart and make the latencies be the same
1764 * if they are close enough together
1765 */
1766 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1767 for (j = 0; j < lgrp_plat_node_cnt; j++) {
1768 if (!memnode_info[j].exists)
1769 continue;
1770 /*
1771 * Pick one pair of nodes (i, j)
1772 * and get latency between them
1773 */
1774 t1 = lat_stats->latencies[i][j];
1775
1776 /*
1777 * Skip this pair of nodes if there isn't a latency
1778 * for it yet
1779 */
1780 if (t1 == 0)
1781 continue;
1782
1783 for (k = 0; k < lgrp_plat_node_cnt; k++) {
1784 for (l = 0; l < lgrp_plat_node_cnt; l++) {
1785 if (!memnode_info[l].exists)
1786 continue;
1787 /*
1788 * Pick another pair of nodes (k, l)
1789 * not same as (i, j) and get latency
1790 * between them
1791 */
1792 if (k == i && l == j)
1793 continue;
1794
1795 t2 = lat_stats->latencies[k][l];
1796
1797 /*
1798 * Skip this pair of nodes if there
1799 * isn't a latency for it yet
1800 */
1801
1802 if (t2 == 0)
1803 continue;
1804
1805 /*
1806 * Skip nodes (k, l) if they already
1807 * have same latency as (i, j) or
1808 * their latency isn't close enough to
1809 * be considered/made the same
1810 */
1811 if (t1 == t2 || (t1 > t2 && t1 - t2 >
1812 t1 >> lgrp_plat_probe_lt_shift) ||
1813 (t2 > t1 && t2 - t1 >
1814 t2 >> lgrp_plat_probe_lt_shift))
1815 continue;
1816
1817 /*
1818 * Make latency(i, j) same as
1819 * latency(k, l), try to use latency
1820 * that has been adjusted already to get
1821 * more consistency (if possible), and
1822 * remember which latencies were
1823 * adjusted for next time
1824 */
1825 if (lat_corrected[i][j]) {
1826 t = t1;
1827 lgrp_config(cflag, t2, t);
1828 t2 = t;
1829 } else if (lat_corrected[k][l]) {
1830 t = t2;
1831 lgrp_config(cflag, t1, t);
1832 t1 = t;
1833 } else {
1834 if (t1 > t2)
1835 t = t2;
1836 else
1837 t = t1;
1838 lgrp_config(cflag, t1, t);
1839 lgrp_config(cflag, t2, t);
1840 t1 = t2 = t;
1841 }
1842
1843 lat_stats->latencies[i][j] =
1844 lat_stats->latencies[k][l] = t;
1845
1846 lat_corrected[i][j] =
1847 lat_corrected[k][l] = 1;
1848 }
1849 }
1850 }
1851 }
1852
1853 /*
1854 * Local latencies should be same
1855 * - Find min and max local latencies
1856 * - Make all local latencies be minimum
1857 */
1858 min = -1;
1859 max = 0;
1860 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1861 if (!memnode_info[i].exists)
1862 continue;
1863 t = lat_stats->latencies[i][i];
1864 if (t == 0)
1865 continue;
1866 if (min == -1 || t < min)
1867 min = t;
1868 if (t > max)
1869 max = t;
1870 }
1871 if (min != max) {
1872 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1873 int local;
1874
1875 if (!memnode_info[i].exists)
1876 continue;
1877
1878 local = lat_stats->latencies[i][i];
1879 if (local == 0)
1880 continue;
1881
1882 /*
1883 * Track suspect probe times that aren't within
1884 * tolerance of minimum local latency and how much
1885 * probe times are corrected by
1886 */
1887 if (local - min > min >> lgrp_plat_probe_lt_shift)
1888 probe_stats->probe_suspect[i][i]++;
1889
1890 probe_stats->probe_errors[i][i] += local - min;
1891
1892 /*
1893 * Make local latencies be minimum
1894 */
1895 lgrp_config(LGRP_CONFIG_LAT_CHANGE, i, min);
1896 lat_stats->latencies[i][i] = min;
1897 }
1898 }
1899
1900 /*
1901 * Determine max probe time again since just adjusted latencies
1902 */
1903 lat_stats->latency_max = 0;
1904 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1905 for (j = 0; j < lgrp_plat_node_cnt; j++) {
1906 if (!memnode_info[j].exists)
1907 continue;
1908 t = lat_stats->latencies[i][j];
1909 if (t > lat_stats->latency_max)
1910 lat_stats->latency_max = t;
1911 }
1912 }
1913 }
1914
1915
1916 /*
1917 * Verify following about latencies between nodes:
1918 *
1919 * - Latencies should be symmetric (ie. latency(a, b) == latency(b, a))
1920 * - Local latencies same
1921 * - Local < remote
1922 * - Number of latencies seen is reasonable
1923 * - Number of occurrences of a given latency should be more than 1
1924 *
1925 * Returns:
1926 * 0 Success
1927 * -1 Not symmetric
1928 * -2 Local latencies not same
1929 * -3 Local >= remote
1930 */
1931 static int
lgrp_plat_latency_verify(memnode_phys_addr_map_t * memnode_info,lgrp_plat_latency_stats_t * lat_stats)1932 lgrp_plat_latency_verify(memnode_phys_addr_map_t *memnode_info,
1933 lgrp_plat_latency_stats_t *lat_stats)
1934 {
1935 int i;
1936 int j;
1937 u_longlong_t t1;
1938 u_longlong_t t2;
1939
1940 ASSERT(memnode_info != NULL && lat_stats != NULL);
1941
1942 /*
1943 * Nothing to do when this is an UMA machine, lgroup topology is
1944 * limited to 2 levels, or there aren't any probe times yet
1945 */
1946 if (max_mem_nodes == 1 || lgrp_topo_levels < 2 ||
1947 lat_stats->latencies[0][0] == 0)
1948 return (0);
1949
1950 /*
1951 * Make sure that latencies are symmetric between any two nodes
1952 * (ie. latency(node0, node1) == latency(node1, node0))
1953 */
1954 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1955 if (!memnode_info[i].exists)
1956 continue;
1957 for (j = 0; j < lgrp_plat_node_cnt; j++) {
1958 if (!memnode_info[j].exists)
1959 continue;
1960 t1 = lat_stats->latencies[i][j];
1961 t2 = lat_stats->latencies[j][i];
1962
1963 if (t1 == 0 || t2 == 0 || t1 == t2)
1964 continue;
1965
1966 return (-1);
1967 }
1968 }
1969
1970 /*
1971 * Local latencies should be same
1972 */
1973 t1 = lat_stats->latencies[0][0];
1974 for (i = 1; i < lgrp_plat_node_cnt; i++) {
1975 if (!memnode_info[i].exists)
1976 continue;
1977
1978 t2 = lat_stats->latencies[i][i];
1979 if (t2 == 0)
1980 continue;
1981
1982 if (t1 == 0) {
1983 t1 = t2;
1984 continue;
1985 }
1986
1987 if (t1 != t2)
1988 return (-2);
1989 }
1990
1991 /*
1992 * Local latencies should be less than remote
1993 */
1994 if (t1) {
1995 for (i = 0; i < lgrp_plat_node_cnt; i++) {
1996 for (j = 0; j < lgrp_plat_node_cnt; j++) {
1997 if (!memnode_info[j].exists)
1998 continue;
1999 t2 = lat_stats->latencies[i][j];
2000 if (i == j || t2 == 0)
2001 continue;
2002
2003 if (t1 >= t2)
2004 return (-3);
2005 }
2006 }
2007 }
2008
2009 return (0);
2010 }
2011
2012
2013 /*
2014 * Platform-specific initialization
2015 */
2016 static void
lgrp_plat_main_init(void)2017 lgrp_plat_main_init(void)
2018 {
2019 int curnode;
2020 int ht_limit;
2021 int i;
2022
2023 /*
2024 * Print a notice that MPO is disabled when memory is interleaved
2025 * across nodes....Would do this when it is discovered, but can't
2026 * because it happens way too early during boot....
2027 */
2028 if (lgrp_plat_mem_intrlv)
2029 cmn_err(CE_NOTE,
2030 "MPO disabled because memory is interleaved\n");
2031
2032 /*
2033 * Don't bother to do any probing if it is disabled, there is only one
2034 * node, or the height of the lgroup topology less than or equal to 2
2035 */
2036 ht_limit = lgrp_topo_ht_limit();
2037 if (!(lgrp_plat_probe_flags & LGRP_PLAT_PROBE_ENABLE) ||
2038 max_mem_nodes == 1 || ht_limit <= 2) {
2039 /*
2040 * Setup lgroup latencies for 2 level lgroup topology
2041 * (ie. local and remote only) if they haven't been set yet
2042 */
2043 if (ht_limit == 2 && lgrp_plat_lat_stats.latency_min == -1 &&
2044 lgrp_plat_lat_stats.latency_max == 0)
2045 lgrp_plat_2level_setup(&lgrp_plat_lat_stats);
2046 return;
2047 }
2048
2049 if (lgrp_plat_probe_flags & LGRP_PLAT_PROBE_VENDOR) {
2050 /*
2051 * Should have been able to probe from CPU 0 when it was added
2052 * to lgroup hierarchy, but may not have been able to then
2053 * because it happens so early in boot that gethrtime() hasn't
2054 * been initialized. (:-(
2055 */
2056 curnode = lgrp_plat_cpu_to_node(CPU, lgrp_plat_cpu_node,
2057 lgrp_plat_cpu_node_nentries);
2058 ASSERT(curnode >= 0 && curnode < lgrp_plat_node_cnt);
2059 if (lgrp_plat_lat_stats.latencies[curnode][curnode] == 0)
2060 lgrp_plat_probe();
2061
2062 return;
2063 }
2064
2065 /*
2066 * When probing memory, use one page for every sample to determine
2067 * lgroup topology and taking multiple samples
2068 */
2069 if (lgrp_plat_probe_mem_config.probe_memsize == 0)
2070 lgrp_plat_probe_mem_config.probe_memsize = PAGESIZE *
2071 lgrp_plat_probe_nsamples;
2072
2073 /*
2074 * Map memory in each node needed for probing to determine latency
2075 * topology
2076 */
2077 for (i = 0; i < lgrp_plat_node_cnt; i++) {
2078 int mnode;
2079
2080 /*
2081 * Skip this node and leave its probe page NULL
2082 * if it doesn't have any memory
2083 */
2084 mnode = i;
2085 if (!mem_node_config[mnode].exists) {
2086 lgrp_plat_probe_mem_config.probe_va[i] = NULL;
2087 continue;
2088 }
2089
2090 /*
2091 * Allocate one kernel virtual page
2092 */
2093 lgrp_plat_probe_mem_config.probe_va[i] = vmem_alloc(heap_arena,
2094 lgrp_plat_probe_mem_config.probe_memsize, VM_NOSLEEP);
2095 if (lgrp_plat_probe_mem_config.probe_va[i] == NULL) {
2096 cmn_err(CE_WARN,
2097 "lgrp_plat_main_init: couldn't allocate memory");
2098 return;
2099 }
2100
2101 /*
2102 * Get PFN for first page in each node
2103 */
2104 lgrp_plat_probe_mem_config.probe_pfn[i] =
2105 mem_node_config[mnode].physbase;
2106
2107 /*
2108 * Map virtual page to first page in node
2109 */
2110 hat_devload(kas.a_hat, lgrp_plat_probe_mem_config.probe_va[i],
2111 lgrp_plat_probe_mem_config.probe_memsize,
2112 lgrp_plat_probe_mem_config.probe_pfn[i],
2113 PROT_READ | PROT_WRITE | HAT_PLAT_NOCACHE,
2114 HAT_LOAD_NOCONSIST);
2115 }
2116
2117 /*
2118 * Probe from current CPU
2119 */
2120 lgrp_plat_probe();
2121 }
2122
2123
2124 /*
2125 * Return the number of free, allocatable, or installed
2126 * pages in an lgroup
2127 * This is a copy of the MAX_MEM_NODES == 1 version of the routine
2128 * used when MPO is disabled (i.e. single lgroup) or this is the root lgroup
2129 */
2130 static pgcnt_t
lgrp_plat_mem_size_default(lgrp_handle_t lgrphand,lgrp_mem_query_t query)2131 lgrp_plat_mem_size_default(lgrp_handle_t lgrphand, lgrp_mem_query_t query)
2132 {
2133 _NOTE(ARGUNUSED(lgrphand));
2134
2135 struct memlist *mlist;
2136 pgcnt_t npgs = 0;
2137 extern struct memlist *phys_avail;
2138 extern struct memlist *phys_install;
2139
2140 switch (query) {
2141 case LGRP_MEM_SIZE_FREE:
2142 return ((pgcnt_t)freemem);
2143 case LGRP_MEM_SIZE_AVAIL:
2144 memlist_read_lock();
2145 for (mlist = phys_avail; mlist; mlist = mlist->ml_next)
2146 npgs += btop(mlist->ml_size);
2147 memlist_read_unlock();
2148 return (npgs);
2149 case LGRP_MEM_SIZE_INSTALL:
2150 memlist_read_lock();
2151 for (mlist = phys_install; mlist; mlist = mlist->ml_next)
2152 npgs += btop(mlist->ml_size);
2153 memlist_read_unlock();
2154 return (npgs);
2155 default:
2156 return ((pgcnt_t)0);
2157 }
2158 }
2159
2160
2161 /*
2162 * Update node to proximity domain mappings for given domain and return node ID
2163 */
2164 static int
lgrp_plat_node_domain_update(node_domain_map_t * node_domain,int node_cnt,uint32_t domain)2165 lgrp_plat_node_domain_update(node_domain_map_t *node_domain, int node_cnt,
2166 uint32_t domain)
2167 {
2168 uint_t node;
2169 uint_t start;
2170
2171 /*
2172 * Hash proximity domain ID into node to domain mapping table (array)
2173 * and add entry for it into first non-existent or matching entry found
2174 */
2175 node = start = NODE_DOMAIN_HASH(domain, node_cnt);
2176 do {
2177 /*
2178 * Entry doesn't exist yet, so create one for this proximity
2179 * domain and return node ID which is index into mapping table.
2180 */
2181 if (!node_domain[node].exists) {
2182 node_domain[node].prox_domain = domain;
2183 membar_producer();
2184 node_domain[node].exists = 1;
2185 return (node);
2186 }
2187
2188 /*
2189 * Entry exists for this proximity domain already, so just
2190 * return node ID (index into table).
2191 */
2192 if (node_domain[node].prox_domain == domain)
2193 return (node);
2194 node = NODE_DOMAIN_HASH(node + 1, node_cnt);
2195 } while (node != start);
2196
2197 /*
2198 * Ran out of supported number of entries which shouldn't happen....
2199 */
2200 ASSERT(node != start);
2201 return (-1);
2202 }
2203
2204 /*
2205 * Update node memory information for given proximity domain with specified
2206 * starting and ending physical address range (and return positive numbers for
2207 * success and negative ones for errors)
2208 */
2209 static int
lgrp_plat_memnode_info_update(node_domain_map_t * node_domain,int node_cnt,memnode_phys_addr_map_t * memnode_info,int memnode_cnt,uint64_t start,uint64_t end,uint32_t domain,uint32_t device_id)2210 lgrp_plat_memnode_info_update(node_domain_map_t *node_domain, int node_cnt,
2211 memnode_phys_addr_map_t *memnode_info, int memnode_cnt, uint64_t start,
2212 uint64_t end, uint32_t domain, uint32_t device_id)
2213 {
2214 int node, mnode;
2215
2216 /*
2217 * Get node number for proximity domain
2218 */
2219 node = lgrp_plat_domain_to_node(node_domain, node_cnt, domain);
2220 if (node == -1) {
2221 node = lgrp_plat_node_domain_update(node_domain, node_cnt,
2222 domain);
2223 if (node == -1)
2224 return (-1);
2225 }
2226
2227 /*
2228 * This function is called during boot if device_id is
2229 * ACPI_MEMNODE_DEVID_BOOT, otherwise it's called at runtime for
2230 * memory DR operations.
2231 */
2232 if (device_id != ACPI_MEMNODE_DEVID_BOOT) {
2233 ASSERT(lgrp_plat_max_mem_node <= memnode_cnt);
2234
2235 for (mnode = lgrp_plat_node_cnt;
2236 mnode < lgrp_plat_max_mem_node; mnode++) {
2237 if (memnode_info[mnode].exists &&
2238 memnode_info[mnode].prox_domain == domain &&
2239 memnode_info[mnode].device_id == device_id) {
2240 if (btop(start) < memnode_info[mnode].start)
2241 memnode_info[mnode].start = btop(start);
2242 if (btop(end) > memnode_info[mnode].end)
2243 memnode_info[mnode].end = btop(end);
2244 return (1);
2245 }
2246 }
2247
2248 if (lgrp_plat_max_mem_node >= memnode_cnt) {
2249 return (-3);
2250 } else {
2251 lgrp_plat_max_mem_node++;
2252 memnode_info[mnode].start = btop(start);
2253 memnode_info[mnode].end = btop(end);
2254 memnode_info[mnode].prox_domain = domain;
2255 memnode_info[mnode].device_id = device_id;
2256 memnode_info[mnode].lgrphand = node;
2257 membar_producer();
2258 memnode_info[mnode].exists = 1;
2259 return (0);
2260 }
2261 }
2262
2263 /*
2264 * Create entry in table for node if it doesn't exist
2265 */
2266 ASSERT(node < memnode_cnt);
2267 if (!memnode_info[node].exists) {
2268 memnode_info[node].start = btop(start);
2269 memnode_info[node].end = btop(end);
2270 memnode_info[node].prox_domain = domain;
2271 memnode_info[node].device_id = device_id;
2272 memnode_info[node].lgrphand = node;
2273 membar_producer();
2274 memnode_info[node].exists = 1;
2275 return (0);
2276 }
2277
2278 /*
2279 * Entry already exists for this proximity domain
2280 *
2281 * There may be more than one SRAT memory entry for a domain, so we may
2282 * need to update existing start or end address for the node.
2283 */
2284 if (memnode_info[node].prox_domain == domain) {
2285 if (btop(start) < memnode_info[node].start)
2286 memnode_info[node].start = btop(start);
2287 if (btop(end) > memnode_info[node].end)
2288 memnode_info[node].end = btop(end);
2289 return (1);
2290 }
2291 return (-2);
2292 }
2293
2294
2295 /*
2296 * Have to sort nodes by starting physical address because plat_mnode_xcheck()
2297 * assumes and expects memnodes to be sorted in ascending order by physical
2298 * address.
2299 */
2300 static void
lgrp_plat_node_sort(node_domain_map_t * node_domain,int node_cnt,cpu_node_map_t * cpu_node,int cpu_count,memnode_phys_addr_map_t * memnode_info)2301 lgrp_plat_node_sort(node_domain_map_t *node_domain, int node_cnt,
2302 cpu_node_map_t *cpu_node, int cpu_count,
2303 memnode_phys_addr_map_t *memnode_info)
2304 {
2305 boolean_t found;
2306 int i;
2307 int j;
2308 int n;
2309 boolean_t sorted;
2310 boolean_t swapped;
2311
2312 if (!lgrp_plat_node_sort_enable || node_cnt <= 1 ||
2313 node_domain == NULL || memnode_info == NULL)
2314 return;
2315
2316 /*
2317 * Sorted already?
2318 */
2319 sorted = B_TRUE;
2320 for (i = 0; i < node_cnt - 1; i++) {
2321 /*
2322 * Skip entries that don't exist
2323 */
2324 if (!memnode_info[i].exists)
2325 continue;
2326
2327 /*
2328 * Try to find next existing entry to compare against
2329 */
2330 found = B_FALSE;
2331 for (j = i + 1; j < node_cnt; j++) {
2332 if (memnode_info[j].exists) {
2333 found = B_TRUE;
2334 break;
2335 }
2336 }
2337
2338 /*
2339 * Done if no more existing entries to compare against
2340 */
2341 if (found == B_FALSE)
2342 break;
2343
2344 /*
2345 * Not sorted if starting address of current entry is bigger
2346 * than starting address of next existing entry
2347 */
2348 if (memnode_info[i].start > memnode_info[j].start) {
2349 sorted = B_FALSE;
2350 break;
2351 }
2352 }
2353
2354 /*
2355 * Don't need to sort if sorted already
2356 */
2357 if (sorted == B_TRUE)
2358 return;
2359
2360 /*
2361 * Just use bubble sort since number of nodes is small
2362 */
2363 n = node_cnt;
2364 do {
2365 swapped = B_FALSE;
2366 n--;
2367 for (i = 0; i < n; i++) {
2368 /*
2369 * Skip entries that don't exist
2370 */
2371 if (!memnode_info[i].exists)
2372 continue;
2373
2374 /*
2375 * Try to find next existing entry to compare against
2376 */
2377 found = B_FALSE;
2378 for (j = i + 1; j <= n; j++) {
2379 if (memnode_info[j].exists) {
2380 found = B_TRUE;
2381 break;
2382 }
2383 }
2384
2385 /*
2386 * Done if no more existing entries to compare against
2387 */
2388 if (found == B_FALSE)
2389 break;
2390
2391 if (memnode_info[i].start > memnode_info[j].start) {
2392 memnode_phys_addr_map_t save_addr;
2393 node_domain_map_t save_node;
2394
2395 /*
2396 * Swap node to proxmity domain ID assignments
2397 */
2398 bcopy(&node_domain[i], &save_node,
2399 sizeof (node_domain_map_t));
2400 bcopy(&node_domain[j], &node_domain[i],
2401 sizeof (node_domain_map_t));
2402 bcopy(&save_node, &node_domain[j],
2403 sizeof (node_domain_map_t));
2404
2405 /*
2406 * Swap node to physical memory assignments
2407 */
2408 bcopy(&memnode_info[i], &save_addr,
2409 sizeof (memnode_phys_addr_map_t));
2410 bcopy(&memnode_info[j], &memnode_info[i],
2411 sizeof (memnode_phys_addr_map_t));
2412 bcopy(&save_addr, &memnode_info[j],
2413 sizeof (memnode_phys_addr_map_t));
2414 swapped = B_TRUE;
2415 }
2416 }
2417 } while (swapped == B_TRUE);
2418
2419 /*
2420 * Check to make sure that CPUs assigned to correct node IDs now since
2421 * node to proximity domain ID assignments may have been changed above
2422 */
2423 if (n == node_cnt - 1 || cpu_node == NULL || cpu_count < 1)
2424 return;
2425 for (i = 0; i < cpu_count; i++) {
2426 int node;
2427
2428 node = lgrp_plat_domain_to_node(node_domain, node_cnt,
2429 cpu_node[i].prox_domain);
2430 if (cpu_node[i].node != node)
2431 cpu_node[i].node = node;
2432 }
2433
2434 }
2435
2436
2437 /*
2438 * Return time needed to probe from current CPU to memory in given node
2439 */
2440 static hrtime_t
lgrp_plat_probe_time(int to,cpu_node_map_t * cpu_node,int cpu_node_nentries,lgrp_plat_probe_mem_config_t * probe_mem_config,lgrp_plat_latency_stats_t * lat_stats,lgrp_plat_probe_stats_t * probe_stats)2441 lgrp_plat_probe_time(int to, cpu_node_map_t *cpu_node, int cpu_node_nentries,
2442 lgrp_plat_probe_mem_config_t *probe_mem_config,
2443 lgrp_plat_latency_stats_t *lat_stats, lgrp_plat_probe_stats_t *probe_stats)
2444 {
2445 caddr_t buf;
2446 hrtime_t elapsed;
2447 hrtime_t end;
2448 int from;
2449 int i;
2450 int ipl;
2451 hrtime_t max;
2452 hrtime_t min;
2453 hrtime_t start;
2454 extern int use_sse_pagecopy;
2455
2456 /*
2457 * Determine ID of node containing current CPU
2458 */
2459 from = lgrp_plat_cpu_to_node(CPU, cpu_node, cpu_node_nentries);
2460 ASSERT(from >= 0 && from < lgrp_plat_node_cnt);
2461
2462 /*
2463 * Do common work for probing main memory
2464 */
2465 if (lgrp_plat_probe_flags & LGRP_PLAT_PROBE_PGCPY) {
2466 /*
2467 * Skip probing any nodes without memory and
2468 * set probe time to 0
2469 */
2470 if (probe_mem_config->probe_va[to] == NULL) {
2471 lat_stats->latencies[from][to] = 0;
2472 return (0);
2473 }
2474
2475 /*
2476 * Invalidate caches once instead of once every sample
2477 * which should cut cost of probing by a lot
2478 */
2479 probe_stats->flush_cost = gethrtime();
2480 invalidate_cache();
2481 probe_stats->flush_cost = gethrtime() -
2482 probe_stats->flush_cost;
2483 probe_stats->probe_cost_total += probe_stats->flush_cost;
2484 }
2485
2486 /*
2487 * Probe from current CPU to given memory using specified operation
2488 * and take specified number of samples
2489 */
2490 max = 0;
2491 min = -1;
2492 for (i = 0; i < lgrp_plat_probe_nsamples; i++) {
2493 probe_stats->probe_cost = gethrtime();
2494
2495 /*
2496 * Can't measure probe time if gethrtime() isn't working yet
2497 */
2498 if (probe_stats->probe_cost == 0 && gethrtime() == 0)
2499 return (0);
2500
2501 if (lgrp_plat_probe_flags & LGRP_PLAT_PROBE_VENDOR) {
2502 /*
2503 * Measure how long it takes to read vendor ID from
2504 * Northbridge
2505 */
2506 elapsed = opt_probe_vendor(to, lgrp_plat_probe_nreads);
2507 } else {
2508 /*
2509 * Measure how long it takes to copy page
2510 * on top of itself
2511 */
2512 buf = probe_mem_config->probe_va[to] + (i * PAGESIZE);
2513
2514 kpreempt_disable();
2515 ipl = splhigh();
2516 start = gethrtime();
2517 if (use_sse_pagecopy)
2518 hwblkpagecopy(buf, buf);
2519 else
2520 bcopy(buf, buf, PAGESIZE);
2521 end = gethrtime();
2522 elapsed = end - start;
2523 splx(ipl);
2524 kpreempt_enable();
2525 }
2526
2527 probe_stats->probe_cost = gethrtime() -
2528 probe_stats->probe_cost;
2529 probe_stats->probe_cost_total += probe_stats->probe_cost;
2530
2531 if (min == -1 || elapsed < min)
2532 min = elapsed;
2533 if (elapsed > max)
2534 max = elapsed;
2535 }
2536
2537 /*
2538 * Update minimum and maximum probe times between
2539 * these two nodes
2540 */
2541 if (min < probe_stats->probe_min[from][to] ||
2542 probe_stats->probe_min[from][to] == 0)
2543 probe_stats->probe_min[from][to] = min;
2544
2545 if (max > probe_stats->probe_max[from][to])
2546 probe_stats->probe_max[from][to] = max;
2547
2548 return (min);
2549 }
2550
2551
2552 /*
2553 * Read boot property with CPU to APIC ID array, fill in CPU to node ID
2554 * mapping table with APIC ID for each CPU (if pointer to table isn't NULL),
2555 * and return number of CPU APIC IDs.
2556 *
2557 * NOTE: This code assumes that CPU IDs are assigned in order that they appear
2558 * in in cpu_apicid_array boot property which is based on and follows
2559 * same ordering as processor list in ACPI MADT. If the code in
2560 * usr/src/uts/i86pc/io/pcplusmp/apic.c that reads MADT and assigns
2561 * CPU IDs ever changes, then this code will need to change too....
2562 */
2563 static int
lgrp_plat_process_cpu_apicids(cpu_node_map_t * cpu_node)2564 lgrp_plat_process_cpu_apicids(cpu_node_map_t *cpu_node)
2565 {
2566 int boot_prop_len;
2567 char *boot_prop_name = BP_CPU_APICID_ARRAY;
2568 uint32_t *cpu_apicid_array;
2569 int i;
2570 int n;
2571
2572 /*
2573 * Check length of property value
2574 */
2575 boot_prop_len = BOP_GETPROPLEN(bootops, boot_prop_name);
2576 if (boot_prop_len <= 0)
2577 return (-1);
2578
2579 /*
2580 * Calculate number of entries in array and return when the system is
2581 * not very interesting for NUMA. It's not interesting for NUMA if
2582 * system has only one CPU and doesn't support CPU hotplug.
2583 */
2584 n = boot_prop_len / sizeof (*cpu_apicid_array);
2585 if (n == 1 && !plat_dr_support_cpu())
2586 return (-2);
2587
2588 cpu_apicid_array = (uint32_t *)BOP_ALLOC(bootops, NULL, boot_prop_len,
2589 sizeof (*cpu_apicid_array));
2590 /*
2591 * Get CPU to APIC ID property value
2592 */
2593 if (cpu_apicid_array == NULL ||
2594 BOP_GETPROP(bootops, boot_prop_name, cpu_apicid_array) < 0)
2595 return (-3);
2596
2597 /*
2598 * Just return number of CPU APIC IDs if CPU to node mapping table is
2599 * NULL
2600 */
2601 if (cpu_node == NULL) {
2602 if (plat_dr_support_cpu() && n >= boot_ncpus) {
2603 return (boot_ncpus);
2604 } else {
2605 return (n);
2606 }
2607 }
2608
2609 /*
2610 * Fill in CPU to node ID mapping table with APIC ID for each CPU
2611 */
2612 for (i = 0; i < n; i++) {
2613 /* Only add boot CPUs into the map if CPU DR is enabled. */
2614 if (plat_dr_support_cpu() && i >= boot_ncpus)
2615 break;
2616 cpu_node[i].exists = 1;
2617 cpu_node[i].apicid = cpu_apicid_array[i];
2618 cpu_node[i].prox_domain = UINT32_MAX;
2619 cpu_node[i].node = UINT_MAX;
2620 }
2621
2622 /*
2623 * Return number of CPUs based on number of APIC IDs
2624 */
2625 return (i);
2626 }
2627
2628
2629 /*
2630 * Read ACPI System Locality Information Table (SLIT) to determine how far each
2631 * NUMA node is from each other
2632 */
2633 static int
lgrp_plat_process_slit(ACPI_TABLE_SLIT * tp,node_domain_map_t * node_domain,uint_t node_cnt,memnode_phys_addr_map_t * memnode_info,lgrp_plat_latency_stats_t * lat_stats)2634 lgrp_plat_process_slit(ACPI_TABLE_SLIT *tp,
2635 node_domain_map_t *node_domain, uint_t node_cnt,
2636 memnode_phys_addr_map_t *memnode_info, lgrp_plat_latency_stats_t *lat_stats)
2637 {
2638 int i;
2639 int j;
2640 int src;
2641 int dst;
2642 int localities;
2643 hrtime_t max;
2644 hrtime_t min;
2645 int retval;
2646 uint8_t *slit_entries;
2647
2648 if (tp == NULL || !lgrp_plat_slit_enable)
2649 return (1);
2650
2651 if (lat_stats == NULL)
2652 return (2);
2653
2654 localities = tp->LocalityCount;
2655
2656 min = lat_stats->latency_min;
2657 max = lat_stats->latency_max;
2658
2659 /*
2660 * Fill in latency matrix based on SLIT entries
2661 */
2662 slit_entries = tp->Entry;
2663 for (i = 0; i < localities; i++) {
2664 src = lgrp_plat_domain_to_node(node_domain,
2665 node_cnt, i);
2666 if (src == -1)
2667 continue;
2668
2669 for (j = 0; j < localities; j++) {
2670 uint8_t latency;
2671
2672 dst = lgrp_plat_domain_to_node(node_domain,
2673 node_cnt, j);
2674 if (dst == -1)
2675 continue;
2676
2677 latency = slit_entries[(i * localities) + j];
2678 lat_stats->latencies[src][dst] = latency;
2679 if (latency < min || min == -1)
2680 min = latency;
2681 if (latency > max)
2682 max = latency;
2683 }
2684 }
2685
2686 /*
2687 * Verify that latencies/distances given in SLIT look reasonable
2688 */
2689 retval = lgrp_plat_latency_verify(memnode_info, lat_stats);
2690
2691 if (retval) {
2692 /*
2693 * Reinitialize (zero) latency table since SLIT doesn't look
2694 * right
2695 */
2696 for (i = 0; i < localities; i++) {
2697 for (j = 0; j < localities; j++)
2698 lat_stats->latencies[i][j] = 0;
2699 }
2700 } else {
2701 /*
2702 * Update min and max latencies seen since SLIT looks valid
2703 */
2704 lat_stats->latency_min = min;
2705 lat_stats->latency_max = max;
2706 }
2707
2708 return (retval);
2709 }
2710
2711
2712 /*
2713 * Update lgrp latencies according to information returned by ACPI _SLI method.
2714 */
2715 static int
lgrp_plat_process_sli(uint32_t domain_id,uchar_t * sli_info,uint32_t sli_cnt,node_domain_map_t * node_domain,uint_t node_cnt,lgrp_plat_latency_stats_t * lat_stats)2716 lgrp_plat_process_sli(uint32_t domain_id, uchar_t *sli_info,
2717 uint32_t sli_cnt, node_domain_map_t *node_domain, uint_t node_cnt,
2718 lgrp_plat_latency_stats_t *lat_stats)
2719 {
2720 int i;
2721 int src, dst;
2722 uint8_t latency;
2723 hrtime_t max, min;
2724
2725 if (lat_stats == NULL || sli_info == NULL ||
2726 sli_cnt == 0 || domain_id >= sli_cnt)
2727 return (-1);
2728
2729 src = lgrp_plat_domain_to_node(node_domain, node_cnt, domain_id);
2730 if (src == -1) {
2731 src = lgrp_plat_node_domain_update(node_domain, node_cnt,
2732 domain_id);
2733 if (src == -1)
2734 return (-1);
2735 }
2736
2737 /*
2738 * Don't update latency info if topology has been flattened to 2 levels.
2739 */
2740 if (lgrp_plat_topo_flatten != 0) {
2741 return (0);
2742 }
2743
2744 /*
2745 * Latency information for proximity domain is ready.
2746 * TODO: support adjusting latency information at runtime.
2747 */
2748 if (lat_stats->latencies[src][src] != 0) {
2749 return (0);
2750 }
2751
2752 /* Validate latency information. */
2753 for (i = 0; i < sli_cnt; i++) {
2754 if (i == domain_id) {
2755 if (sli_info[i] != ACPI_SLIT_SELF_LATENCY ||
2756 sli_info[sli_cnt + i] != ACPI_SLIT_SELF_LATENCY) {
2757 return (-1);
2758 }
2759 } else {
2760 if (sli_info[i] <= ACPI_SLIT_SELF_LATENCY ||
2761 sli_info[sli_cnt + i] <= ACPI_SLIT_SELF_LATENCY ||
2762 sli_info[i] != sli_info[sli_cnt + i]) {
2763 return (-1);
2764 }
2765 }
2766 }
2767
2768 min = lat_stats->latency_min;
2769 max = lat_stats->latency_max;
2770 for (i = 0; i < sli_cnt; i++) {
2771 dst = lgrp_plat_domain_to_node(node_domain, node_cnt, i);
2772 if (dst == -1)
2773 continue;
2774
2775 ASSERT(sli_info[i] == sli_info[sli_cnt + i]);
2776
2777 /* Update row in latencies matrix. */
2778 latency = sli_info[i];
2779 lat_stats->latencies[src][dst] = latency;
2780 if (latency < min || min == -1)
2781 min = latency;
2782 if (latency > max)
2783 max = latency;
2784
2785 /* Update column in latencies matrix. */
2786 latency = sli_info[sli_cnt + i];
2787 lat_stats->latencies[dst][src] = latency;
2788 if (latency < min || min == -1)
2789 min = latency;
2790 if (latency > max)
2791 max = latency;
2792 }
2793 lat_stats->latency_min = min;
2794 lat_stats->latency_max = max;
2795
2796 return (0);
2797 }
2798
2799
2800 /*
2801 * Read ACPI System Resource Affinity Table (SRAT) to determine which CPUs
2802 * and memory are local to each other in the same NUMA node and return number
2803 * of nodes
2804 */
2805 static int
lgrp_plat_process_srat(ACPI_TABLE_SRAT * tp,ACPI_TABLE_MSCT * mp,uint32_t * prox_domain_min,node_domain_map_t * node_domain,cpu_node_map_t * cpu_node,int cpu_count,memnode_phys_addr_map_t * memnode_info)2806 lgrp_plat_process_srat(ACPI_TABLE_SRAT *tp, ACPI_TABLE_MSCT *mp,
2807 uint32_t *prox_domain_min, node_domain_map_t *node_domain,
2808 cpu_node_map_t *cpu_node, int cpu_count,
2809 memnode_phys_addr_map_t *memnode_info)
2810 {
2811 ACPI_SUBTABLE_HEADER *item, *srat_end;
2812 int i;
2813 int node_cnt;
2814 int proc_entry_count;
2815 int rc;
2816
2817 /*
2818 * Nothing to do when no SRAT or disabled
2819 */
2820 if (tp == NULL || !lgrp_plat_srat_enable)
2821 return (-1);
2822
2823 /*
2824 * Try to get domain information from MSCT table.
2825 * ACPI4.0: OSPM will use information provided by the MSCT only
2826 * when the System Resource Affinity Table (SRAT) exists.
2827 */
2828 node_cnt = lgrp_plat_msct_domains(mp, prox_domain_min);
2829 if (node_cnt <= 0) {
2830 /*
2831 * Determine number of nodes by counting number of proximity
2832 * domains in SRAT.
2833 */
2834 node_cnt = lgrp_plat_srat_domains(tp, prox_domain_min);
2835 }
2836 /*
2837 * Return if number of nodes is 1 or less since don't need to read SRAT.
2838 */
2839 if (node_cnt == 1)
2840 return (1);
2841 else if (node_cnt <= 0)
2842 return (-2);
2843
2844 /*
2845 * Walk through SRAT, examining each CPU and memory entry to determine
2846 * which CPUs and memory belong to which node.
2847 */
2848 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)tp + sizeof (*tp));
2849 srat_end = (ACPI_SUBTABLE_HEADER *)(tp->Header.Length + (uintptr_t)tp);
2850 proc_entry_count = 0;
2851 while (item < srat_end) {
2852 uint32_t apic_id;
2853 uint32_t domain;
2854 uint64_t end;
2855 uint64_t length;
2856 uint64_t start;
2857
2858 switch (item->Type) {
2859 case ACPI_SRAT_TYPE_CPU_AFFINITY: { /* CPU entry */
2860 ACPI_SRAT_CPU_AFFINITY *cpu =
2861 (ACPI_SRAT_CPU_AFFINITY *) item;
2862
2863 if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED) ||
2864 cpu_node == NULL)
2865 break;
2866
2867 /*
2868 * Calculate domain (node) ID and fill in APIC ID to
2869 * domain/node mapping table
2870 */
2871 domain = cpu->ProximityDomainLo;
2872 for (i = 0; i < 3; i++) {
2873 domain += cpu->ProximityDomainHi[i] <<
2874 ((i + 1) * 8);
2875 }
2876 apic_id = cpu->ApicId;
2877
2878 rc = lgrp_plat_cpu_node_update(node_domain, node_cnt,
2879 cpu_node, cpu_count, apic_id, domain);
2880 if (rc < 0)
2881 return (-3);
2882 else if (rc == 0)
2883 proc_entry_count++;
2884 break;
2885 }
2886 case ACPI_SRAT_TYPE_MEMORY_AFFINITY: { /* memory entry */
2887 ACPI_SRAT_MEM_AFFINITY *mem =
2888 (ACPI_SRAT_MEM_AFFINITY *)item;
2889
2890 if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED) ||
2891 memnode_info == NULL)
2892 break;
2893
2894 /*
2895 * Get domain (node) ID and fill in domain/node
2896 * to memory mapping table
2897 */
2898 domain = mem->ProximityDomain;
2899 start = mem->BaseAddress;
2900 length = mem->Length;
2901 end = start + length - 1;
2902
2903 /*
2904 * According to ACPI 4.0, both ENABLE and HOTPLUG flags
2905 * may be set for memory address range entries in SRAT
2906 * table which are reserved for memory hot plug.
2907 * We intersect memory address ranges in SRAT table
2908 * with memory ranges in physinstalled to filter out
2909 * memory address ranges reserved for hot plug.
2910 */
2911 if (mem->Flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
2912 uint64_t rstart = UINT64_MAX;
2913 uint64_t rend = 0;
2914 struct memlist *ml;
2915 extern struct bootops *bootops;
2916
2917 memlist_read_lock();
2918 for (ml = bootops->boot_mem->physinstalled;
2919 ml; ml = ml->ml_next) {
2920 uint64_t tstart = ml->ml_address;
2921 uint64_t tend;
2922
2923 tend = ml->ml_address + ml->ml_size;
2924 if (tstart > end || tend < start)
2925 continue;
2926 if (start > tstart)
2927 tstart = start;
2928 if (rstart > tstart)
2929 rstart = tstart;
2930 if (end < tend)
2931 tend = end;
2932 if (rend < tend)
2933 rend = tend;
2934 }
2935 memlist_read_unlock();
2936 start = rstart;
2937 end = rend;
2938 /* Skip this entry if no memory installed. */
2939 if (start > end)
2940 break;
2941 }
2942
2943 if (lgrp_plat_memnode_info_update(node_domain,
2944 node_cnt, memnode_info, node_cnt,
2945 start, end, domain, ACPI_MEMNODE_DEVID_BOOT) < 0)
2946 return (-4);
2947 break;
2948 }
2949 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: { /* x2apic CPU */
2950 ACPI_SRAT_X2APIC_CPU_AFFINITY *x2cpu =
2951 (ACPI_SRAT_X2APIC_CPU_AFFINITY *) item;
2952
2953 if (!(x2cpu->Flags & ACPI_SRAT_CPU_ENABLED) ||
2954 cpu_node == NULL)
2955 break;
2956
2957 /*
2958 * Calculate domain (node) ID and fill in APIC ID to
2959 * domain/node mapping table
2960 */
2961 domain = x2cpu->ProximityDomain;
2962 apic_id = x2cpu->ApicId;
2963
2964 rc = lgrp_plat_cpu_node_update(node_domain, node_cnt,
2965 cpu_node, cpu_count, apic_id, domain);
2966 if (rc < 0)
2967 return (-3);
2968 else if (rc == 0)
2969 proc_entry_count++;
2970 break;
2971 }
2972 default:
2973 break;
2974 }
2975
2976 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)item + item->Length);
2977 }
2978
2979 /*
2980 * Should have seen at least as many SRAT processor entries as CPUs
2981 */
2982 if (proc_entry_count < cpu_count)
2983 return (-5);
2984
2985 /*
2986 * Need to sort nodes by starting physical address since VM system
2987 * assumes and expects memnodes to be sorted in ascending order by
2988 * physical address
2989 */
2990 lgrp_plat_node_sort(node_domain, node_cnt, cpu_node, cpu_count,
2991 memnode_info);
2992
2993 return (node_cnt);
2994 }
2995
2996
2997 /*
2998 * Allocate permanent memory for any temporary memory that we needed to
2999 * allocate using BOP_ALLOC() before kmem_alloc() and VM system were
3000 * initialized and copy everything from temporary to permanent memory since
3001 * temporary boot memory will eventually be released during boot
3002 */
3003 static void
lgrp_plat_release_bootstrap(void)3004 lgrp_plat_release_bootstrap(void)
3005 {
3006 void *buf;
3007 size_t size;
3008
3009 if (lgrp_plat_cpu_node_nentries > 0) {
3010 size = lgrp_plat_cpu_node_nentries * sizeof (cpu_node_map_t);
3011 buf = kmem_alloc(size, KM_SLEEP);
3012 bcopy(lgrp_plat_cpu_node, buf, size);
3013 lgrp_plat_cpu_node = buf;
3014 }
3015 }
3016
3017
3018 /*
3019 * Return number of proximity domains given in ACPI SRAT
3020 */
3021 static int
lgrp_plat_srat_domains(ACPI_TABLE_SRAT * tp,uint32_t * prox_domain_min)3022 lgrp_plat_srat_domains(ACPI_TABLE_SRAT *tp, uint32_t *prox_domain_min)
3023 {
3024 int domain_cnt;
3025 uint32_t domain_min;
3026 ACPI_SUBTABLE_HEADER *item, *end;
3027 int i;
3028 node_domain_map_t node_domain[MAX_NODES];
3029
3030
3031 if (tp == NULL || !lgrp_plat_srat_enable)
3032 return (1);
3033
3034 /*
3035 * Walk through SRAT to find minimum proximity domain ID
3036 */
3037 domain_min = UINT32_MAX;
3038 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)tp + sizeof (*tp));
3039 end = (ACPI_SUBTABLE_HEADER *)(tp->Header.Length + (uintptr_t)tp);
3040 while (item < end) {
3041 uint32_t domain;
3042
3043 switch (item->Type) {
3044 case ACPI_SRAT_TYPE_CPU_AFFINITY: { /* CPU entry */
3045 ACPI_SRAT_CPU_AFFINITY *cpu =
3046 (ACPI_SRAT_CPU_AFFINITY *) item;
3047
3048 if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED)) {
3049 item = (ACPI_SUBTABLE_HEADER *)
3050 ((uintptr_t)item + item->Length);
3051 continue;
3052 }
3053 domain = cpu->ProximityDomainLo;
3054 for (i = 0; i < 3; i++) {
3055 domain += cpu->ProximityDomainHi[i] <<
3056 ((i + 1) * 8);
3057 }
3058 break;
3059 }
3060 case ACPI_SRAT_TYPE_MEMORY_AFFINITY: { /* memory entry */
3061 ACPI_SRAT_MEM_AFFINITY *mem =
3062 (ACPI_SRAT_MEM_AFFINITY *)item;
3063
3064 if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED)) {
3065 item = (ACPI_SUBTABLE_HEADER *)
3066 ((uintptr_t)item + item->Length);
3067 continue;
3068 }
3069 domain = mem->ProximityDomain;
3070 break;
3071 }
3072 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: { /* x2apic CPU */
3073 ACPI_SRAT_X2APIC_CPU_AFFINITY *x2cpu =
3074 (ACPI_SRAT_X2APIC_CPU_AFFINITY *) item;
3075
3076 if (!(x2cpu->Flags & ACPI_SRAT_CPU_ENABLED)) {
3077 item = (ACPI_SUBTABLE_HEADER *)
3078 ((uintptr_t)item + item->Length);
3079 continue;
3080 }
3081 domain = x2cpu->ProximityDomain;
3082 break;
3083 }
3084 default:
3085 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)item +
3086 item->Length);
3087 continue;
3088 }
3089
3090 /*
3091 * Keep track of minimum proximity domain ID
3092 */
3093 if (domain < domain_min)
3094 domain_min = domain;
3095
3096 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)item + item->Length);
3097 }
3098 if (lgrp_plat_domain_min_enable && prox_domain_min != NULL)
3099 *prox_domain_min = domain_min;
3100
3101 /*
3102 * Walk through SRAT, examining each CPU and memory entry to determine
3103 * proximity domain ID for each.
3104 */
3105 domain_cnt = 0;
3106 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)tp + sizeof (*tp));
3107 end = (ACPI_SUBTABLE_HEADER *)(tp->Header.Length + (uintptr_t)tp);
3108 bzero(node_domain, MAX_NODES * sizeof (node_domain_map_t));
3109 while (item < end) {
3110 uint32_t domain;
3111 boolean_t overflow;
3112 uint_t start;
3113
3114 switch (item->Type) {
3115 case ACPI_SRAT_TYPE_CPU_AFFINITY: { /* CPU entry */
3116 ACPI_SRAT_CPU_AFFINITY *cpu =
3117 (ACPI_SRAT_CPU_AFFINITY *) item;
3118
3119 if (!(cpu->Flags & ACPI_SRAT_CPU_ENABLED)) {
3120 item = (ACPI_SUBTABLE_HEADER *)
3121 ((uintptr_t)item + item->Length);
3122 continue;
3123 }
3124 domain = cpu->ProximityDomainLo;
3125 for (i = 0; i < 3; i++) {
3126 domain += cpu->ProximityDomainHi[i] <<
3127 ((i + 1) * 8);
3128 }
3129 break;
3130 }
3131 case ACPI_SRAT_TYPE_MEMORY_AFFINITY: { /* memory entry */
3132 ACPI_SRAT_MEM_AFFINITY *mem =
3133 (ACPI_SRAT_MEM_AFFINITY *)item;
3134
3135 if (!(mem->Flags & ACPI_SRAT_MEM_ENABLED)) {
3136 item = (ACPI_SUBTABLE_HEADER *)
3137 ((uintptr_t)item + item->Length);
3138 continue;
3139 }
3140 domain = mem->ProximityDomain;
3141 break;
3142 }
3143 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: { /* x2apic CPU */
3144 ACPI_SRAT_X2APIC_CPU_AFFINITY *x2cpu =
3145 (ACPI_SRAT_X2APIC_CPU_AFFINITY *) item;
3146
3147 if (!(x2cpu->Flags & ACPI_SRAT_CPU_ENABLED)) {
3148 item = (ACPI_SUBTABLE_HEADER *)
3149 ((uintptr_t)item + item->Length);
3150 continue;
3151 }
3152 domain = x2cpu->ProximityDomain;
3153 break;
3154 }
3155 default:
3156 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)item +
3157 item->Length);
3158 continue;
3159 }
3160
3161 /*
3162 * Count and keep track of which proximity domain IDs seen
3163 */
3164 start = i = domain % MAX_NODES;
3165 overflow = B_TRUE;
3166 do {
3167 /*
3168 * Create entry for proximity domain and increment
3169 * count when no entry exists where proximity domain
3170 * hashed
3171 */
3172 if (!node_domain[i].exists) {
3173 node_domain[i].exists = 1;
3174 node_domain[i].prox_domain = domain;
3175 domain_cnt++;
3176 overflow = B_FALSE;
3177 break;
3178 }
3179
3180 /*
3181 * Nothing to do when proximity domain seen already
3182 * and its entry exists
3183 */
3184 if (node_domain[i].prox_domain == domain) {
3185 overflow = B_FALSE;
3186 break;
3187 }
3188
3189 /*
3190 * Entry exists where proximity domain hashed, but for
3191 * different proximity domain so keep search for empty
3192 * slot to put it or matching entry whichever comes
3193 * first.
3194 */
3195 i = (i + 1) % MAX_NODES;
3196 } while (i != start);
3197
3198 /*
3199 * Didn't find empty or matching entry which means have more
3200 * proximity domains than supported nodes (:-(
3201 */
3202 ASSERT(overflow != B_TRUE);
3203 if (overflow == B_TRUE)
3204 return (-1);
3205
3206 item = (ACPI_SUBTABLE_HEADER *)((uintptr_t)item + item->Length);
3207 }
3208 return (domain_cnt);
3209 }
3210
3211
3212 /*
3213 * Parse domain information in ACPI Maximum System Capability Table (MSCT).
3214 * MSCT table has been verified in function process_msct() in fakebop.c.
3215 */
3216 static int
lgrp_plat_msct_domains(ACPI_TABLE_MSCT * tp,uint32_t * prox_domain_min)3217 lgrp_plat_msct_domains(ACPI_TABLE_MSCT *tp, uint32_t *prox_domain_min)
3218 {
3219 int last_seen = 0;
3220 uint32_t proxmin = UINT32_MAX;
3221 ACPI_MSCT_PROXIMITY *item, *end;
3222
3223 if (tp == NULL || lgrp_plat_msct_enable == 0)
3224 return (-1);
3225
3226 if (tp->MaxProximityDomains >= MAX_NODES) {
3227 cmn_err(CE_CONT,
3228 "?lgrp: too many proximity domains (%d), max %d supported, "
3229 "disable support of CPU/memory DR operations.",
3230 tp->MaxProximityDomains + 1, MAX_NODES);
3231 plat_dr_disable_cpu();
3232 plat_dr_disable_memory();
3233 return (-1);
3234 }
3235
3236 if (prox_domain_min != NULL) {
3237 end = (void *)(tp->Header.Length + (uintptr_t)tp);
3238 for (item = (void *)((uintptr_t)tp +
3239 tp->ProximityOffset); item < end;
3240 item = (void *)(item->Length + (uintptr_t)item)) {
3241 if (item->RangeStart < proxmin) {
3242 proxmin = item->RangeStart;
3243 }
3244
3245 last_seen = item->RangeEnd - item->RangeStart + 1;
3246 /*
3247 * Break out if all proximity domains have been
3248 * processed. Some BIOSes may have unused items
3249 * at the end of MSCT table.
3250 */
3251 if (last_seen > tp->MaxProximityDomains) {
3252 break;
3253 }
3254 }
3255 *prox_domain_min = proxmin;
3256 }
3257
3258 return (tp->MaxProximityDomains + 1);
3259 }
3260
3261
3262 /*
3263 * Set lgroup latencies for 2 level lgroup topology
3264 */
3265 static void
lgrp_plat_2level_setup(lgrp_plat_latency_stats_t * lat_stats)3266 lgrp_plat_2level_setup(lgrp_plat_latency_stats_t *lat_stats)
3267 {
3268 int i, j;
3269
3270 ASSERT(lat_stats != NULL);
3271
3272 if (lgrp_plat_node_cnt >= 4)
3273 cmn_err(CE_NOTE,
3274 "MPO only optimizing for local and remote\n");
3275 for (i = 0; i < lgrp_plat_node_cnt; i++) {
3276 for (j = 0; j < lgrp_plat_node_cnt; j++) {
3277 if (i == j)
3278 lat_stats->latencies[i][j] = 2;
3279 else
3280 lat_stats->latencies[i][j] = 3;
3281 }
3282 }
3283 lat_stats->latency_min = 2;
3284 lat_stats->latency_max = 3;
3285 /* TODO: check it. */
3286 lgrp_config(LGRP_CONFIG_FLATTEN, 2, 0);
3287 lgrp_plat_topo_flatten = 1;
3288 }
3289
3290
3291 /*
3292 * The following Opteron specific constants, macros, types, and routines define
3293 * PCI configuration space registers and how to read them to determine the NUMA
3294 * configuration of *supported* Opteron processors. They provide the same
3295 * information that may be gotten from the ACPI System Resource Affinity Table
3296 * (SRAT) if it exists on the machine of interest.
3297 *
3298 * The AMD BIOS and Kernel Developer's Guide (BKDG) for the processor family
3299 * of interest describes all of these registers and their contents. The main
3300 * registers used by this code to determine the NUMA configuration of the
3301 * machine are the node ID register for the number of NUMA nodes and the DRAM
3302 * address map registers for the physical address range of each node.
3303 *
3304 * NOTE: The format and how to determine the NUMA configuration using PCI
3305 * config space registers may change or may not be supported in future
3306 * Opteron processor families.
3307 */
3308
3309 /*
3310 * How many bits to shift Opteron DRAM Address Map base and limit registers
3311 * to get actual value
3312 */
3313 #define OPT_DRAMADDR_HI_LSHIFT_ADDR 40 /* shift left for address */
3314 #define OPT_DRAMADDR_LO_LSHIFT_ADDR 8 /* shift left for address */
3315
3316 #define OPT_DRAMADDR_HI_MASK_ADDR 0x000000FF /* address bits 47-40 */
3317 #define OPT_DRAMADDR_LO_MASK_ADDR 0xFFFF0000 /* address bits 39-24 */
3318
3319 #define OPT_DRAMADDR_LO_MASK_OFF 0xFFFFFF /* offset for address */
3320
3321 /*
3322 * Macros to derive addresses from Opteron DRAM Address Map registers
3323 */
3324 #define OPT_DRAMADDR_HI(reg) \
3325 (((u_longlong_t)reg & OPT_DRAMADDR_HI_MASK_ADDR) << \
3326 OPT_DRAMADDR_HI_LSHIFT_ADDR)
3327
3328 #define OPT_DRAMADDR_LO(reg) \
3329 (((u_longlong_t)reg & OPT_DRAMADDR_LO_MASK_ADDR) << \
3330 OPT_DRAMADDR_LO_LSHIFT_ADDR)
3331
3332 #define OPT_DRAMADDR(high, low) \
3333 (OPT_DRAMADDR_HI(high) | OPT_DRAMADDR_LO(low))
3334
3335 /*
3336 * Bit masks defining what's in Opteron DRAM Address Map base register
3337 */
3338 #define OPT_DRAMBASE_LO_MASK_RE 0x1 /* read enable */
3339 #define OPT_DRAMBASE_LO_MASK_WE 0x2 /* write enable */
3340 #define OPT_DRAMBASE_LO_MASK_INTRLVEN 0x700 /* interleave */
3341
3342 /*
3343 * Bit masks defining what's in Opteron DRAM Address Map limit register
3344 */
3345 #define OPT_DRAMLIMIT_LO_MASK_DSTNODE 0x7 /* destination node */
3346 #define OPT_DRAMLIMIT_LO_MASK_INTRLVSEL 0x700 /* interleave select */
3347
3348
3349 /*
3350 * Opteron Node ID register in PCI configuration space contains
3351 * number of nodes in system, etc. for Opteron K8. The following
3352 * constants and macros define its contents, structure, and access.
3353 */
3354
3355 /*
3356 * Bit masks defining what's in Opteron Node ID register
3357 */
3358 #define OPT_NODE_MASK_ID 0x7 /* node ID */
3359 #define OPT_NODE_MASK_CNT 0x70 /* node count */
3360 #define OPT_NODE_MASK_IONODE 0x700 /* Hypertransport I/O hub node ID */
3361 #define OPT_NODE_MASK_LCKNODE 0x7000 /* lock controller node ID */
3362 #define OPT_NODE_MASK_CPUCNT 0xF0000 /* CPUs in system (0 means 1 CPU) */
3363
3364 /*
3365 * How many bits in Opteron Node ID register to shift right to get actual value
3366 */
3367 #define OPT_NODE_RSHIFT_CNT 0x4 /* shift right for node count value */
3368
3369 /*
3370 * Macros to get values from Opteron Node ID register
3371 */
3372 #define OPT_NODE_CNT(reg) \
3373 ((reg & OPT_NODE_MASK_CNT) >> OPT_NODE_RSHIFT_CNT)
3374
3375 /*
3376 * Macro to setup PCI Extended Configuration Space (ECS) address to give to
3377 * "in/out" instructions
3378 *
3379 * NOTE: Should only be used in lgrp_plat_init() before MMIO setup because any
3380 * other uses should just do MMIO to access PCI ECS.
3381 * Must enable special bit in Northbridge Configuration Register on
3382 * Greyhound for extended CF8 space access to be able to access PCI ECS
3383 * using "in/out" instructions and restore special bit after done
3384 * accessing PCI ECS.
3385 */
3386 #define OPT_PCI_ECS_ADDR(bus, device, function, reg) \
3387 (PCI_CONE | (((bus) & 0xff) << 16) | (((device & 0x1f)) << 11) | \
3388 (((function) & 0x7) << 8) | ((reg) & 0xfc) | \
3389 ((((reg) >> 8) & 0xf) << 24))
3390
3391 /*
3392 * PCI configuration space registers accessed by specifying
3393 * a bus, device, function, and offset. The following constants
3394 * define the values needed to access Opteron K8 configuration
3395 * info to determine its node topology
3396 */
3397
3398 #define OPT_PCS_BUS_CONFIG 0 /* Hypertransport config space bus */
3399
3400 /*
3401 * Opteron PCI configuration space register function values
3402 */
3403 #define OPT_PCS_FUNC_HT 0 /* Hypertransport configuration */
3404 #define OPT_PCS_FUNC_ADDRMAP 1 /* Address map configuration */
3405 #define OPT_PCS_FUNC_DRAM 2 /* DRAM configuration */
3406 #define OPT_PCS_FUNC_MISC 3 /* Miscellaneous configuration */
3407
3408 /*
3409 * PCI Configuration Space register offsets
3410 */
3411 #define OPT_PCS_OFF_VENDOR 0x0 /* device/vendor ID register */
3412 #define OPT_PCS_OFF_DRAMBASE_HI 0x140 /* DRAM Base register (node 0) */
3413 #define OPT_PCS_OFF_DRAMBASE_LO 0x40 /* DRAM Base register (node 0) */
3414 #define OPT_PCS_OFF_NODEID 0x60 /* Node ID register */
3415
3416 /*
3417 * Opteron PCI Configuration Space device IDs for nodes
3418 */
3419 #define OPT_PCS_DEV_NODE0 24 /* device number for node 0 */
3420
3421
3422 /*
3423 * Opteron DRAM address map gives base and limit for physical memory in a node
3424 */
3425 typedef struct opt_dram_addr_map {
3426 uint32_t base_hi;
3427 uint32_t base_lo;
3428 uint32_t limit_hi;
3429 uint32_t limit_lo;
3430 } opt_dram_addr_map_t;
3431
3432
3433 /*
3434 * Supported AMD processor families
3435 */
3436 #define AMD_FAMILY_HAMMER 15
3437 #define AMD_FAMILY_GREYHOUND 16
3438
3439 /*
3440 * Whether to have is_opteron() return 1 even when processor isn't supported
3441 */
3442 uint_t is_opteron_override = 0;
3443
3444 /*
3445 * AMD processor family for current CPU
3446 */
3447 uint_t opt_family = 0;
3448
3449
3450 /*
3451 * Determine whether we're running on a supported AMD Opteron since reading
3452 * node count and DRAM address map registers may have different format or
3453 * may not be supported across processor families
3454 */
3455 static int
is_opteron(void)3456 is_opteron(void)
3457 {
3458
3459 if (x86_vendor != X86_VENDOR_AMD)
3460 return (0);
3461
3462 opt_family = cpuid_getfamily(CPU);
3463 if (opt_family == AMD_FAMILY_HAMMER ||
3464 opt_family == AMD_FAMILY_GREYHOUND || is_opteron_override)
3465 return (1);
3466 else
3467 return (0);
3468 }
3469
3470
3471 /*
3472 * Determine NUMA configuration for Opteron from registers that live in PCI
3473 * configuration space
3474 */
3475 static void
opt_get_numa_config(uint_t * node_cnt,int * mem_intrlv,memnode_phys_addr_map_t * memnode_info)3476 opt_get_numa_config(uint_t *node_cnt, int *mem_intrlv,
3477 memnode_phys_addr_map_t *memnode_info)
3478 {
3479 uint_t bus;
3480 uint_t dev;
3481 struct opt_dram_addr_map dram_map[MAX_NODES];
3482 uint_t node;
3483 uint_t node_info[MAX_NODES];
3484 uint_t off_hi;
3485 uint_t off_lo;
3486 uint64_t nb_cfg_reg;
3487
3488 /*
3489 * Read configuration registers from PCI configuration space to
3490 * determine node information, which memory is in each node, etc.
3491 *
3492 * Write to PCI configuration space address register to specify
3493 * which configuration register to read and read/write PCI
3494 * configuration space data register to get/set contents
3495 */
3496 bus = OPT_PCS_BUS_CONFIG;
3497 dev = OPT_PCS_DEV_NODE0;
3498 off_hi = OPT_PCS_OFF_DRAMBASE_HI;
3499 off_lo = OPT_PCS_OFF_DRAMBASE_LO;
3500
3501 /*
3502 * Read node ID register for node 0 to get node count
3503 */
3504 node_info[0] = pci_getl_func(bus, dev, OPT_PCS_FUNC_HT,
3505 OPT_PCS_OFF_NODEID);
3506 *node_cnt = OPT_NODE_CNT(node_info[0]) + 1;
3507
3508 /*
3509 * If number of nodes is more than maximum supported, then set node
3510 * count to 1 and treat system as UMA instead of NUMA.
3511 */
3512 if (*node_cnt > MAX_NODES) {
3513 *node_cnt = 1;
3514 return;
3515 }
3516
3517 /*
3518 * For Greyhound, PCI Extended Configuration Space must be enabled to
3519 * read high DRAM address map base and limit registers
3520 */
3521 nb_cfg_reg = 0;
3522 if (opt_family == AMD_FAMILY_GREYHOUND) {
3523 nb_cfg_reg = rdmsr(MSR_AMD_NB_CFG);
3524 if ((nb_cfg_reg & AMD_GH_NB_CFG_EN_ECS) == 0)
3525 wrmsr(MSR_AMD_NB_CFG,
3526 nb_cfg_reg | AMD_GH_NB_CFG_EN_ECS);
3527 }
3528
3529 for (node = 0; node < *node_cnt; node++) {
3530 uint32_t base_hi;
3531 uint32_t base_lo;
3532 uint32_t limit_hi;
3533 uint32_t limit_lo;
3534
3535 /*
3536 * Read node ID register (except for node 0 which we just read)
3537 */
3538 if (node > 0) {
3539 node_info[node] = pci_getl_func(bus, dev,
3540 OPT_PCS_FUNC_HT, OPT_PCS_OFF_NODEID);
3541 }
3542
3543 /*
3544 * Read DRAM base and limit registers which specify
3545 * physical memory range of each node
3546 */
3547 if (opt_family != AMD_FAMILY_GREYHOUND)
3548 base_hi = 0;
3549 else {
3550 outl(PCI_CONFADD, OPT_PCI_ECS_ADDR(bus, dev,
3551 OPT_PCS_FUNC_ADDRMAP, off_hi));
3552 base_hi = dram_map[node].base_hi =
3553 inl(PCI_CONFDATA);
3554 }
3555 base_lo = dram_map[node].base_lo = pci_getl_func(bus, dev,
3556 OPT_PCS_FUNC_ADDRMAP, off_lo);
3557
3558 if ((dram_map[node].base_lo & OPT_DRAMBASE_LO_MASK_INTRLVEN) &&
3559 mem_intrlv)
3560 *mem_intrlv = *mem_intrlv + 1;
3561
3562 off_hi += 4; /* high limit register offset */
3563 if (opt_family != AMD_FAMILY_GREYHOUND)
3564 limit_hi = 0;
3565 else {
3566 outl(PCI_CONFADD, OPT_PCI_ECS_ADDR(bus, dev,
3567 OPT_PCS_FUNC_ADDRMAP, off_hi));
3568 limit_hi = dram_map[node].limit_hi =
3569 inl(PCI_CONFDATA);
3570 }
3571
3572 off_lo += 4; /* low limit register offset */
3573 limit_lo = dram_map[node].limit_lo = pci_getl_func(bus,
3574 dev, OPT_PCS_FUNC_ADDRMAP, off_lo);
3575
3576 /*
3577 * Increment device number to next node and register offsets
3578 * for DRAM base register of next node
3579 */
3580 off_hi += 4;
3581 off_lo += 4;
3582 dev++;
3583
3584 /*
3585 * Both read and write enable bits must be enabled in DRAM
3586 * address map base register for physical memory to exist in
3587 * node
3588 */
3589 if ((base_lo & OPT_DRAMBASE_LO_MASK_RE) == 0 ||
3590 (base_lo & OPT_DRAMBASE_LO_MASK_WE) == 0) {
3591 /*
3592 * Mark node memory as non-existent and set start and
3593 * end addresses to be same in memnode_info[]
3594 */
3595 memnode_info[node].exists = 0;
3596 memnode_info[node].start = memnode_info[node].end =
3597 (pfn_t)-1;
3598 continue;
3599 }
3600
3601 /*
3602 * Mark node memory as existing and remember physical address
3603 * range of each node for use later
3604 */
3605 memnode_info[node].exists = 1;
3606
3607 memnode_info[node].start = btop(OPT_DRAMADDR(base_hi, base_lo));
3608
3609 memnode_info[node].end = btop(OPT_DRAMADDR(limit_hi, limit_lo) |
3610 OPT_DRAMADDR_LO_MASK_OFF);
3611 }
3612
3613 /*
3614 * Restore PCI Extended Configuration Space enable bit
3615 */
3616 if (opt_family == AMD_FAMILY_GREYHOUND) {
3617 if ((nb_cfg_reg & AMD_GH_NB_CFG_EN_ECS) == 0)
3618 wrmsr(MSR_AMD_NB_CFG, nb_cfg_reg);
3619 }
3620 }
3621
3622
3623 /*
3624 * Return average amount of time to read vendor ID register on Northbridge
3625 * N times on specified destination node from current CPU
3626 */
3627 static hrtime_t
opt_probe_vendor(int dest_node,int nreads)3628 opt_probe_vendor(int dest_node, int nreads)
3629 {
3630 int cnt;
3631 uint_t dev;
3632 /* LINTED: set but not used in function */
3633 volatile uint_t dev_vendor __unused;
3634 hrtime_t elapsed;
3635 hrtime_t end;
3636 int ipl;
3637 hrtime_t start;
3638
3639 dev = OPT_PCS_DEV_NODE0 + dest_node;
3640 kpreempt_disable();
3641 ipl = spl8();
3642 outl(PCI_CONFADD, PCI_CADDR1(0, dev, OPT_PCS_FUNC_DRAM,
3643 OPT_PCS_OFF_VENDOR));
3644 start = gethrtime();
3645 for (cnt = 0; cnt < nreads; cnt++)
3646 dev_vendor = inl(PCI_CONFDATA);
3647 end = gethrtime();
3648 elapsed = (end - start) / nreads;
3649 splx(ipl);
3650 kpreempt_enable();
3651 return (elapsed);
3652 }
3653