xref: /illumos-gate/usr/src/uts/intel/io/amdzen/amdzen.c (revision b8052df9f609edb713f6828c9eecc3d7be19dfb3)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2019, Joyent, Inc.
14  * Copyright 2022 Oxide Computer Company
15  */
16 
17 /*
18  * Nexus Driver for AMD Zen family systems. The purpose of this driver is to
19  * provide access to the following resources in a single, centralized fashion:
20  *
21  *  - The per-chip Data Fabric
22  *  - The North Bridge
23  *  - The System Management Network (SMN)
24  *
25  * This is a nexus driver as once we have attached to all the requisite
26  * components, we will enumerate child devices which consume this functionality.
27  *
28  * ------------------------
29  * Mapping Devices Together
30  * ------------------------
31  *
32  * The operating system needs to expose things like temperature sensors and DRAM
33  * configuration registers in terms that are meaningful to the system such as
34  * logical CPUs, cores, etc. This driver attaches to the PCI IDs that represent
35  * the northbridge and data fabric; however, there are multiple PCI devices (one
36  * per die) that exist. This driver does manage to map all of these three things
37  * together; however, it requires some acrobatics. Unfortunately, there's no
38  * direct way to map a northbridge to its corresponding die. However, we can map
39  * a CPU die to a data fabric PCI device and a data fabric PCI device to a
40  * corresponding northbridge PCI device.
41  *
42  * In current Zen based products, there is a direct mapping between processor
43  * nodes and a data fabric PCI device. All of the devices are on PCI Bus 0 and
44  * start from Device 0x18. Device 0x18 maps to processor node 0, 0x19 to
45  * processor node 1, etc. This means that to map a logical CPU to a data fabric
46  * device, we take its processor node id, add it to 0x18 and find the PCI device
47  * that is on bus 0, device 0x18. As each data fabric device is attached based
48  * on its PCI ID, we add it to the global list, amd_nbdf_dfs that is in the
49  * amd_f17nbdf_t structure.
50  *
51  * The northbridge PCI device has a defined device and function, but the PCI bus
52  * that it's on can vary. Each die has its own series of PCI buses that are
53  * assigned to it and the northbridge PCI device is on the first of die-specific
54  * PCI bus for each die. This also means that the northbridge will not show up
55  * on PCI bus 0, which is the PCI bus that all of the data fabric devices are
56  * on. While conventionally the northbridge with the lowest PCI bus value
57  * would correspond to processor node zero, hardware does not guarantee that at
58  * all. Because we don't want to be at the mercy of firmware, we don't rely on
59  * this ordering, even though we have yet to find a system that deviates from
60  * this scheme.
61  *
62  * One of the registers in the data fabric device's function 0
63  * (AMDZEN_DF_F0_CFG_ADDR_CTL) happens to have the first PCI bus that is
64  * associated with the processor node. This means that we can map a data fabric
65  * device to a northbridge by finding the northbridge whose PCI bus matches the
66  * value in the corresponding data fabric's AMDZEN_DF_F0_CFG_ADDR_CTL.
67  *
68  * We can map a northbridge to a data fabric device and a data fabric device to
69  * a die. Because these are generally 1:1 mappings, there is a transitive
70  * relationship and therefore we know which northbridge is associated with which
71  * processor die. This is summarized in the following image:
72  *
73  *  +-------+    +-----------------------------------+        +--------------+
74  *  | Die 0 |--->| Data Fabric PCI BDF 0/18/0        |------->| Northbridge  |
75  *  +-------+    | AMDZEN_DF_F0_CFG_ADDR_CTL: bus 10 |        | PCI  10/0/0  |
76  *     ...       +-----------------------------------+        +--------------+
77  *  +-------+     +------------------------------------+        +--------------+
78  *  | Die n |---->| Data Fabric PCI BDF 0/18+n/0       |------->| Northbridge  |
79  *  +-------+     | AMDZEN_DF_F0_CFG_ADDR_CTL: bus 133 |        | PCI 133/0/0  |
80  *                +------------------------------------+        +--------------+
81  *
82  * Note, the PCI buses used by the northbridges here are arbitrary. They do not
83  * reflect the actual values by hardware; however, the bus/device/function (BDF)
84  * of the data fabric accurately models hardware. All of the BDF values are in
85  * hex.
86  *
87  * Starting with the Rome generation of processors (Family 17h Model 30-3Fh),
88  * AMD has multiple northbridges that exist on a given die. All of these
89  * northbridges share the same data fabric and system management network port.
90  * From our perspective this means that some of the northbridge devices will be
91  * redundant and that we will no longer have a 1:1 mapping between the
92  * northbridge and the data fabric devices. Every data fabric will have a
93  * northbridge, but not every northbridge will have a data fabric device mapped.
94  * Because we're always trying to map from a die to a northbridge and not the
95  * reverse, the fact that there are extra northbridge devices hanging around
96  * that we don't know about shouldn't be a problem.
97  *
98  * -------------------------------
99  * Attach and Detach Complications
100  * -------------------------------
101  *
102  * Because we need to map different PCI devices together, this means that we
103  * have multiple dev_info_t structures that we need to manage. Each of these is
104  * independently attached and detached. While this is easily managed for attach,
105  * it is not for detach. Each of these devices is a 'stub'.
106  *
107  * Once a device has been detached it will only come back if we have an active
108  * minor node that will be accessed. This means that if they are detached,
109  * nothing would ever cause them to be reattached. The system also doesn't
110  * provide us a way or any guarantees around making sure that we're attached to
111  * all such devices before we detach. As a result, unfortunately, it's easier to
112  * basically have detach always fail.
113  *
114  * ---------------
115  * Exposed Devices
116  * ---------------
117  *
118  * Rather than try and have all of the different functions that could be
119  * provided by one driver, we instead have created a nexus driver that will
120  * itself try and load children. Children are all pseudo-device drivers that
121  * provide different pieces of functionality that use this.
122  *
123  * -------
124  * Locking
125  * -------
126  *
127  * The amdzen_data structure contains a single lock, azn_mutex. The various
128  * client functions are intended for direct children of our nexus, but have been
129  * designed in case someone else depends on this driver despite not being a
130  * child. Once a DF has been discovered, the set of entities inside of it
131  * (adf_nents, adf_ents[]) is considered static, constant data. This means that
132  * iterating over it in and of itself does not require locking; however, the
133  * discovery of the amd_df_t does. In addition, whenever performing register
134  * accesses to the DF or SMN, those require locking. This means that one must
135  * hold the lock in the following circumstances:
136  *
137  *   o Looking up DF structures
138  *   o Reading or writing to DF registers
139  *   o Reading or writing to SMN registers
140  *
141  * In general, it is preferred that the lock be held across an entire client
142  * operation if possible. The only time this becomes an issue are when we have
143  * callbacks into our callers (ala amdzen_c_df_iter()) as they will likely
144  * recursively call into us.
145  */
146 
147 #include <sys/modctl.h>
148 #include <sys/conf.h>
149 #include <sys/devops.h>
150 #include <sys/ddi.h>
151 #include <sys/sunddi.h>
152 #include <sys/pci.h>
153 #include <sys/sysmacros.h>
154 #include <sys/sunndi.h>
155 #include <sys/x86_archext.h>
156 #include <sys/cpuvar.h>
157 
158 #include <sys/amdzen/df.h>
159 #include "amdzen_client.h"
160 #include "amdzen.h"
161 
162 amdzen_t *amdzen_data;
163 
164 /*
165  * Array of northbridge IDs that we care about.
166  */
167 static const uint16_t amdzen_nb_ids[] = {
168 	/* Family 17h Ryzen, Epyc Models 00h-0fh (Zen uarch) */
169 	0x1450,
170 	/* Family 17h Raven Ridge, Kestrel, Dali Models 10h-2fh (Zen uarch) */
171 	0x15d0,
172 	/* Family 17h/19h Rome, Milan, Matisse, Vermeer Zen 2/Zen 3 uarch */
173 	0x1480,
174 	/* Family 17h/19h Renoir, Cezanne, Van Gogh Zen 2/3 uarch */
175 	0x1630,
176 	/* Family 19h Genoa */
177 	0x14a4,
178 	/* Family 17h Mendocino, Family 19h Rembrandt */
179 	0x14b5,
180 	/* Family 19h Raphael */
181 	0x14d8
182 };
183 
184 typedef struct {
185 	char *acd_name;
186 	amdzen_child_t acd_addr;
187 } amdzen_child_data_t;
188 
189 static const amdzen_child_data_t amdzen_children[] = {
190 	{ "smntemp", AMDZEN_C_SMNTEMP },
191 	{ "usmn", AMDZEN_C_USMN },
192 	{ "zen_udf", AMDZEN_C_ZEN_UDF },
193 	{ "zen_umc", AMDZEN_C_ZEN_UMC }
194 };
195 
196 static uint32_t
197 amdzen_stub_get32(amdzen_stub_t *stub, off_t reg)
198 {
199 	return (pci_config_get32(stub->azns_cfgspace, reg));
200 }
201 
202 static uint64_t
203 amdzen_stub_get64(amdzen_stub_t *stub, off_t reg)
204 {
205 	return (pci_config_get64(stub->azns_cfgspace, reg));
206 }
207 
208 static void
209 amdzen_stub_put32(amdzen_stub_t *stub, off_t reg, uint32_t val)
210 {
211 	pci_config_put32(stub->azns_cfgspace, reg, val);
212 }
213 
214 static uint64_t
215 amdzen_df_read_regdef(amdzen_t *azn, amdzen_df_t *df, const df_reg_def_t def,
216     uint8_t inst, boolean_t do_64)
217 {
218 	df_reg_def_t ficaa;
219 	df_reg_def_t ficad;
220 	uint32_t val = 0;
221 	df_rev_t df_rev = azn->azn_dfs[0].adf_rev;
222 
223 	VERIFY(MUTEX_HELD(&azn->azn_mutex));
224 	ASSERT3U(def.drd_gens & df_rev, ==, df_rev);
225 	val = DF_FICAA_V2_SET_TARG_INST(val, 1);
226 	val = DF_FICAA_V2_SET_FUNC(val, def.drd_func);
227 	val = DF_FICAA_V2_SET_INST(val, inst);
228 	val = DF_FICAA_V2_SET_64B(val, do_64 ? 1 : 0);
229 
230 	switch (df_rev) {
231 	case DF_REV_2:
232 	case DF_REV_3:
233 	case DF_REV_3P5:
234 		ficaa = DF_FICAA_V2;
235 		ficad = DF_FICAD_LO_V2;
236 		/*
237 		 * Both here and in the DFv4 case, the register ignores the
238 		 * lower 2 bits. That is we can only address and encode things
239 		 * in units of 4 bytes.
240 		 */
241 		val = DF_FICAA_V2_SET_REG(val, def.drd_reg >> 2);
242 		break;
243 	case DF_REV_4:
244 		ficaa = DF_FICAA_V4;
245 		ficad = DF_FICAD_LO_V4;
246 		val = DF_FICAA_V4_SET_REG(val, def.drd_reg >> 2);
247 		break;
248 	default:
249 		panic("encountered unexpected DF rev: %u", df_rev);
250 	}
251 
252 	amdzen_stub_put32(df->adf_funcs[ficaa.drd_func], ficaa.drd_reg, val);
253 	if (do_64) {
254 		return (amdzen_stub_get64(df->adf_funcs[ficad.drd_func],
255 		    ficad.drd_reg));
256 	} else {
257 		return (amdzen_stub_get32(df->adf_funcs[ficad.drd_func],
258 		    ficad.drd_reg));
259 	}
260 }
261 
262 /*
263  * Perform a targeted 32-bit indirect read to a specific instance and function.
264  */
265 static uint32_t
266 amdzen_df_read32(amdzen_t *azn, amdzen_df_t *df, uint8_t inst,
267     const df_reg_def_t def)
268 {
269 	return (amdzen_df_read_regdef(azn, df, def, inst, B_FALSE));
270 }
271 
272 /*
273  * For a broadcast read, just go to the underlying PCI function and perform a
274  * read. At this point in time, we don't believe we need to use the FICAA/FICAD
275  * to access it (though it does have a broadcast mode).
276  */
277 static uint32_t
278 amdzen_df_read32_bcast(amdzen_t *azn, amdzen_df_t *df, const df_reg_def_t def)
279 {
280 	VERIFY(MUTEX_HELD(&azn->azn_mutex));
281 	return (amdzen_stub_get32(df->adf_funcs[def.drd_func], def.drd_reg));
282 }
283 
284 
285 static uint32_t
286 amdzen_smn_read32(amdzen_t *azn, amdzen_df_t *df, const smn_reg_t reg)
287 {
288 	VERIFY(MUTEX_HELD(&azn->azn_mutex));
289 	amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_ADDR, SMN_REG_ADDR(reg));
290 	return (amdzen_stub_get32(df->adf_nb, AMDZEN_NB_SMN_DATA));
291 }
292 
293 static void
294 amdzen_smn_write32(amdzen_t *azn, amdzen_df_t *df, const smn_reg_t reg,
295     const uint32_t val)
296 {
297 	VERIFY(MUTEX_HELD(&azn->azn_mutex));
298 	amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_ADDR, SMN_REG_ADDR(reg));
299 	amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_DATA, val);
300 }
301 
302 static amdzen_df_t *
303 amdzen_df_find(amdzen_t *azn, uint_t dfno)
304 {
305 	uint_t i;
306 
307 	ASSERT(MUTEX_HELD(&azn->azn_mutex));
308 	if (dfno >= azn->azn_ndfs) {
309 		return (NULL);
310 	}
311 
312 	for (i = 0; i < azn->azn_ndfs; i++) {
313 		amdzen_df_t *df = &azn->azn_dfs[i];
314 		if ((df->adf_flags & AMDZEN_DF_F_VALID) == 0) {
315 			continue;
316 		}
317 
318 		if (dfno == 0) {
319 			return (df);
320 		}
321 		dfno--;
322 	}
323 
324 	return (NULL);
325 }
326 
327 /*
328  * Client functions that are used by nexus children.
329  */
330 int
331 amdzen_c_smn_read32(uint_t dfno, const smn_reg_t reg, uint32_t *valp)
332 {
333 	amdzen_df_t *df;
334 	amdzen_t *azn = amdzen_data;
335 
336 	mutex_enter(&azn->azn_mutex);
337 	df = amdzen_df_find(azn, dfno);
338 	if (df == NULL) {
339 		mutex_exit(&azn->azn_mutex);
340 		return (ENOENT);
341 	}
342 
343 	if ((df->adf_flags & AMDZEN_DF_F_FOUND_NB) == 0) {
344 		mutex_exit(&azn->azn_mutex);
345 		return (ENXIO);
346 	}
347 
348 	*valp = amdzen_smn_read32(azn, df, reg);
349 	mutex_exit(&azn->azn_mutex);
350 	return (0);
351 }
352 
353 int
354 amdzen_c_smn_write32(uint_t dfno, const smn_reg_t reg, const uint32_t val)
355 {
356 	amdzen_df_t *df;
357 	amdzen_t *azn = amdzen_data;
358 
359 	mutex_enter(&azn->azn_mutex);
360 	df = amdzen_df_find(azn, dfno);
361 	if (df == NULL) {
362 		mutex_exit(&azn->azn_mutex);
363 		return (ENOENT);
364 	}
365 
366 	if ((df->adf_flags & AMDZEN_DF_F_FOUND_NB) == 0) {
367 		mutex_exit(&azn->azn_mutex);
368 		return (ENXIO);
369 	}
370 
371 	amdzen_smn_write32(azn, df, reg, val);
372 	mutex_exit(&azn->azn_mutex);
373 	return (0);
374 }
375 
376 
377 uint_t
378 amdzen_c_df_count(void)
379 {
380 	uint_t ret;
381 	amdzen_t *azn = amdzen_data;
382 
383 	mutex_enter(&azn->azn_mutex);
384 	ret = azn->azn_ndfs;
385 	mutex_exit(&azn->azn_mutex);
386 	return (ret);
387 }
388 
389 df_rev_t
390 amdzen_c_df_rev(void)
391 {
392 	amdzen_df_t *df;
393 	amdzen_t *azn = amdzen_data;
394 	df_rev_t rev;
395 
396 	/*
397 	 * Always use the first DF instance to determine what we're using. Our
398 	 * current assumption, which seems to generally be true, is that the
399 	 * given DF revisions are the same in a given system when the DFs are
400 	 * directly connected.
401 	 */
402 	mutex_enter(&azn->azn_mutex);
403 	df = amdzen_df_find(azn, 0);
404 	if (df == NULL) {
405 		rev = DF_REV_UNKNOWN;
406 	} else {
407 		rev = df->adf_rev;
408 	}
409 	mutex_exit(&azn->azn_mutex);
410 
411 	return (rev);
412 }
413 
414 int
415 amdzen_c_df_read32(uint_t dfno, uint8_t inst, const df_reg_def_t def,
416     uint32_t *valp)
417 {
418 	amdzen_df_t *df;
419 	amdzen_t *azn = amdzen_data;
420 
421 	mutex_enter(&azn->azn_mutex);
422 	df = amdzen_df_find(azn, dfno);
423 	if (df == NULL) {
424 		mutex_exit(&azn->azn_mutex);
425 		return (ENOENT);
426 	}
427 
428 	*valp = amdzen_df_read_regdef(azn, df, def, inst, B_FALSE);
429 	mutex_exit(&azn->azn_mutex);
430 
431 	return (0);
432 }
433 
434 int
435 amdzen_c_df_read64(uint_t dfno, uint8_t inst, const df_reg_def_t def,
436     uint64_t *valp)
437 {
438 	amdzen_df_t *df;
439 	amdzen_t *azn = amdzen_data;
440 
441 	mutex_enter(&azn->azn_mutex);
442 	df = amdzen_df_find(azn, dfno);
443 	if (df == NULL) {
444 		mutex_exit(&azn->azn_mutex);
445 		return (ENOENT);
446 	}
447 
448 	*valp = amdzen_df_read_regdef(azn, df, def, inst, B_TRUE);
449 	mutex_exit(&azn->azn_mutex);
450 
451 	return (0);
452 }
453 
454 int
455 amdzen_c_df_iter(uint_t dfno, zen_df_type_t type, amdzen_c_iter_f func,
456     void *arg)
457 {
458 	amdzen_df_t *df;
459 	amdzen_t *azn = amdzen_data;
460 	df_type_t df_type;
461 	uint8_t df_subtype;
462 
463 	/*
464 	 * Unlike other calls here, we hold our lock only to find the DF here.
465 	 * The main reason for this is the nature of the callback function.
466 	 * Folks are iterating over instances so they can call back into us. If
467 	 * you look at the locking statement, the thing that is most volatile
468 	 * right here and what we need to protect is the DF itself and
469 	 * subsequent register accesses to it. The actual data about which
470 	 * entities exist is static and so once we have found a DF we should
471 	 * hopefully be in good shape as they only come, but don't go.
472 	 */
473 	mutex_enter(&azn->azn_mutex);
474 	df = amdzen_df_find(azn, dfno);
475 	if (df == NULL) {
476 		mutex_exit(&azn->azn_mutex);
477 		return (ENOENT);
478 	}
479 	mutex_exit(&azn->azn_mutex);
480 
481 	switch (type) {
482 	case ZEN_DF_TYPE_CS_UMC:
483 		df_type = DF_TYPE_CS;
484 		/*
485 		 * In the original Zeppelin DFv2 die there was no subtype field
486 		 * used for the CS. The UMC is the only type and has a subtype
487 		 * of zero.
488 		 */
489 		if (df->adf_rev != DF_REV_2) {
490 			df_subtype = DF_CS_SUBTYPE_UMC;
491 		} else {
492 			df_subtype = 0;
493 		}
494 		break;
495 	case ZEN_DF_TYPE_CCM_CPU:
496 		/*
497 		 * While the wording of the PPR is a little weird, the CCM still
498 		 * has subtype 0 in DFv4 systems; however, what's said to be for
499 		 * the CPU appears to apply to the ACM.
500 		 */
501 		df_type = DF_TYPE_CCM;
502 		df_subtype = 0;
503 		break;
504 	default:
505 		return (EINVAL);
506 	}
507 
508 	for (uint_t i = 0; i < df->adf_nents; i++) {
509 		amdzen_df_ent_t *ent = &df->adf_ents[i];
510 
511 		/*
512 		 * Some DF components are not considered enabled and therefore
513 		 * will end up having bogus values in their ID fields. If we do
514 		 * not have an enable flag set, we must skip this node.
515 		 */
516 		if ((ent->adfe_flags & AMDZEN_DFE_F_ENABLED) == 0)
517 			continue;
518 
519 		if (ent->adfe_type == df_type &&
520 		    ent->adfe_subtype == df_subtype) {
521 			int ret = func(dfno, ent->adfe_fabric_id,
522 			    ent->adfe_inst_id, arg);
523 			if (ret != 0) {
524 				return (ret);
525 			}
526 		}
527 	}
528 
529 	return (0);
530 }
531 
532 int
533 amdzen_c_df_fabric_decomp(df_fabric_decomp_t *decomp)
534 {
535 	const amdzen_df_t *df;
536 	amdzen_t *azn = amdzen_data;
537 
538 	mutex_enter(&azn->azn_mutex);
539 	df = amdzen_df_find(azn, 0);
540 	if (df == NULL) {
541 		mutex_exit(&azn->azn_mutex);
542 		return (ENOENT);
543 	}
544 
545 	*decomp = df->adf_decomp;
546 	mutex_exit(&azn->azn_mutex);
547 	return (0);
548 }
549 
550 static boolean_t
551 amdzen_create_child(amdzen_t *azn, const amdzen_child_data_t *acd)
552 {
553 	int ret;
554 	dev_info_t *child;
555 
556 	if (ndi_devi_alloc(azn->azn_dip, acd->acd_name,
557 	    (pnode_t)DEVI_SID_NODEID, &child) != NDI_SUCCESS) {
558 		dev_err(azn->azn_dip, CE_WARN, "!failed to allocate child "
559 		    "dip for %s", acd->acd_name);
560 		return (B_FALSE);
561 	}
562 
563 	ddi_set_parent_data(child, (void *)acd);
564 	if ((ret = ndi_devi_online(child, 0)) != NDI_SUCCESS) {
565 		dev_err(azn->azn_dip, CE_WARN, "!failed to online child "
566 		    "dip %s: %d", acd->acd_name, ret);
567 		return (B_FALSE);
568 	}
569 
570 	return (B_TRUE);
571 }
572 
573 static boolean_t
574 amdzen_map_dfs(amdzen_t *azn)
575 {
576 	amdzen_stub_t *stub;
577 
578 	ASSERT(MUTEX_HELD(&azn->azn_mutex));
579 
580 	for (stub = list_head(&azn->azn_df_stubs); stub != NULL;
581 	    stub = list_next(&azn->azn_df_stubs, stub)) {
582 		amdzen_df_t *df;
583 		uint_t dfno;
584 
585 		dfno = stub->azns_dev - AMDZEN_DF_FIRST_DEVICE;
586 		if (dfno > AMDZEN_MAX_DFS) {
587 			dev_err(stub->azns_dip, CE_WARN, "encountered df "
588 			    "device with illegal DF PCI b/d/f: 0x%x/%x/%x",
589 			    stub->azns_bus, stub->azns_dev, stub->azns_func);
590 			goto err;
591 		}
592 
593 		df = &azn->azn_dfs[dfno];
594 
595 		if (stub->azns_func >= AMDZEN_MAX_DF_FUNCS) {
596 			dev_err(stub->azns_dip, CE_WARN, "encountered df "
597 			    "device with illegal DF PCI b/d/f: 0x%x/%x/%x",
598 			    stub->azns_bus, stub->azns_dev, stub->azns_func);
599 			goto err;
600 		}
601 
602 		if (df->adf_funcs[stub->azns_func] != NULL) {
603 			dev_err(stub->azns_dip, CE_WARN, "encountered "
604 			    "duplicate df device with DF PCI b/d/f: 0x%x/%x/%x",
605 			    stub->azns_bus, stub->azns_dev, stub->azns_func);
606 			goto err;
607 		}
608 		df->adf_funcs[stub->azns_func] = stub;
609 	}
610 
611 	return (B_TRUE);
612 
613 err:
614 	azn->azn_flags |= AMDZEN_F_DEVICE_ERROR;
615 	return (B_FALSE);
616 }
617 
618 static boolean_t
619 amdzen_check_dfs(amdzen_t *azn)
620 {
621 	uint_t i;
622 	boolean_t ret = B_TRUE;
623 
624 	for (i = 0; i < AMDZEN_MAX_DFS; i++) {
625 		amdzen_df_t *df = &azn->azn_dfs[i];
626 		uint_t count = 0;
627 
628 		/*
629 		 * We require all platforms to have DFs functions 0-6. Not all
630 		 * platforms have DF function 7.
631 		 */
632 		for (uint_t func = 0; func < AMDZEN_MAX_DF_FUNCS - 1; func++) {
633 			if (df->adf_funcs[func] != NULL) {
634 				count++;
635 			}
636 		}
637 
638 		if (count == 0)
639 			continue;
640 
641 		if (count != 7) {
642 			ret = B_FALSE;
643 			dev_err(azn->azn_dip, CE_WARN, "df %u devices "
644 			    "incomplete", i);
645 		} else {
646 			df->adf_flags |= AMDZEN_DF_F_VALID;
647 			azn->azn_ndfs++;
648 		}
649 	}
650 
651 	return (ret);
652 }
653 
654 static const uint8_t amdzen_df_rome_ids[0x2b] = {
655 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19, 20, 21, 22, 23,
656 	24, 25, 26, 27, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
657 	44, 45, 46, 47, 48
658 };
659 
660 /*
661  * Check the first df entry to see if it belongs to Rome or Milan. If so, then
662  * it uses the disjoint ID space.
663  */
664 static boolean_t
665 amdzen_is_rome_style(uint_t id)
666 {
667 	return (id == 0x1490 || id == 0x1650);
668 }
669 
670 /*
671  * To be able to do most other things we want to do, we must first determine
672  * what revision of the DF (data fabric) that we're using.
673  *
674  * Snapshot the df version. This was added explicitly in DFv4.0, around the Zen
675  * 4 timeframe and allows us to tell apart different version of the DF register
676  * set, most usefully when various subtypes were added.
677  *
678  * Older versions can theoretically be told apart based on usage of reserved
679  * registers. We walk these in the following order, starting with the newest rev
680  * and walking backwards to tell things apart:
681  *
682  *   o v3.5 -> Check function 1, register 0x150. This was reserved prior
683  *             to this point. This is actually DF_FIDMASK0_V3P5. We are supposed
684  *             to check bits [7:0].
685  *
686  *   o v3.0 -> Check function 1, register 0x208. The low byte (7:0) was
687  *             changed to indicate a component mask. This is non-zero
688  *             in the 3.0 generation. This is actually DF_FIDMASK_V2.
689  *
690  *   o v2.0 -> This is just the not that case. Presumably v1 wasn't part
691  *             of the Zen generation.
692  *
693  * Because we don't know what version we are yet, we do not use the normal
694  * versioned register accesses which would check what DF version we are and
695  * would want to use the normal indirect register accesses (which also require
696  * us to know the version). We instead do direct broadcast reads.
697  */
698 static void
699 amdzen_determine_df_vers(amdzen_t *azn, amdzen_df_t *df)
700 {
701 	uint32_t val;
702 	df_reg_def_t rd = DF_FBICNT;
703 
704 	val = amdzen_stub_get32(df->adf_funcs[rd.drd_func], rd.drd_reg);
705 	df->adf_major = DF_FBICNT_V4_GET_MAJOR(val);
706 	df->adf_minor = DF_FBICNT_V4_GET_MINOR(val);
707 	if (df->adf_major == 0 && df->adf_minor == 0) {
708 		rd = DF_FIDMASK0_V3P5;
709 		val = amdzen_stub_get32(df->adf_funcs[rd.drd_func], rd.drd_reg);
710 		if (bitx32(val, 7, 0) != 0) {
711 			df->adf_major = 3;
712 			df->adf_minor = 5;
713 			df->adf_rev = DF_REV_3P5;
714 		} else {
715 			rd = DF_FIDMASK_V2;
716 			val = amdzen_stub_get32(df->adf_funcs[rd.drd_func],
717 			    rd.drd_reg);
718 			if (bitx32(val, 7, 0) != 0) {
719 				df->adf_major = 3;
720 				df->adf_minor = 0;
721 				df->adf_rev = DF_REV_3;
722 			} else {
723 				df->adf_major = 2;
724 				df->adf_minor = 0;
725 				df->adf_rev = DF_REV_2;
726 			}
727 		}
728 	} else if (df->adf_major == 4 && df->adf_minor == 0) {
729 		df->adf_rev = DF_REV_4;
730 	} else {
731 		df->adf_rev = DF_REV_UNKNOWN;
732 	}
733 }
734 
735 /*
736  * All of the different versions of the DF have different ways of getting at and
737  * answering the question of how do I break a fabric ID into a corresponding
738  * socket, die, and component. Importantly the goal here is to obtain, cache,
739  * and normalize:
740  *
741  *  o The DF System Configuration
742  *  o The various Mask registers
743  *  o The Node ID
744  */
745 static void
746 amdzen_determine_fabric_decomp(amdzen_t *azn, amdzen_df_t *df)
747 {
748 	uint32_t mask;
749 	df_fabric_decomp_t *decomp = &df->adf_decomp;
750 
751 	switch (df->adf_rev) {
752 	case DF_REV_2:
753 		df->adf_syscfg = amdzen_df_read32_bcast(azn, df, DF_SYSCFG_V2);
754 		switch (DF_SYSCFG_V2_GET_MY_TYPE(df->adf_syscfg)) {
755 		case DF_DIE_TYPE_CPU:
756 			mask = amdzen_df_read32_bcast(azn, df,
757 			    DF_DIEMASK_CPU_V2);
758 			break;
759 		case DF_DIE_TYPE_APU:
760 			mask = amdzen_df_read32_bcast(azn, df,
761 			    DF_DIEMASK_APU_V2);
762 			break;
763 		default:
764 			panic("DF thinks we're not on a CPU!");
765 		}
766 		df->adf_mask0 = mask;
767 
768 		/*
769 		 * DFv2 is a bit different in how the fabric mask register is
770 		 * phrased. Logically a fabric ID is broken into something that
771 		 * uniquely identifies a "node" (a particular die on a socket)
772 		 * and something that identifies a "component", e.g. a memory
773 		 * controller.
774 		 *
775 		 * Starting with DFv3, these registers logically called out how
776 		 * to separate the fabric ID first into a node and a component.
777 		 * Then the node was then broken down into a socket and die. In
778 		 * DFv2, there is no separate mask and shift of a node. Instead
779 		 * the socket and die are absolute offsets into the fabric ID
780 		 * rather than relative offsets into the node ID. As such, when
781 		 * we encounter DFv2, we fake up a node mask and shift and make
782 		 * it look like DFv3+.
783 		 */
784 		decomp->dfd_node_mask = DF_DIEMASK_V2_GET_SOCK_MASK(mask) |
785 		    DF_DIEMASK_V2_GET_DIE_MASK(mask);
786 		decomp->dfd_node_shift = DF_DIEMASK_V2_GET_DIE_SHIFT(mask);
787 		decomp->dfd_comp_mask = DF_DIEMASK_V2_GET_COMP_MASK(mask);
788 		decomp->dfd_comp_shift = 0;
789 
790 		decomp->dfd_sock_mask = DF_DIEMASK_V2_GET_SOCK_MASK(mask) >>
791 		    decomp->dfd_node_shift;
792 		decomp->dfd_die_mask = DF_DIEMASK_V2_GET_DIE_MASK(mask) >>
793 		    decomp->dfd_node_shift;
794 		decomp->dfd_sock_shift = DF_DIEMASK_V2_GET_SOCK_SHIFT(mask) -
795 		    decomp->dfd_node_shift;
796 		decomp->dfd_die_shift = DF_DIEMASK_V2_GET_DIE_SHIFT(mask) -
797 		    decomp->dfd_node_shift;
798 		ASSERT3U(decomp->dfd_die_shift, ==, 0);
799 		break;
800 	case DF_REV_3:
801 		df->adf_syscfg = amdzen_df_read32_bcast(azn, df, DF_SYSCFG_V3);
802 		df->adf_mask0 =  amdzen_df_read32_bcast(azn, df,
803 		    DF_FIDMASK0_V3);
804 		df->adf_mask1 =  amdzen_df_read32_bcast(azn, df,
805 		    DF_FIDMASK1_V3);
806 
807 		decomp->dfd_sock_mask =
808 		    DF_FIDMASK1_V3_GET_SOCK_MASK(df->adf_mask1);
809 		decomp->dfd_sock_shift =
810 		    DF_FIDMASK1_V3_GET_SOCK_SHIFT(df->adf_mask1);
811 		decomp->dfd_die_mask =
812 		    DF_FIDMASK1_V3_GET_DIE_MASK(df->adf_mask1);
813 		decomp->dfd_die_shift = 0;
814 		decomp->dfd_node_mask =
815 		    DF_FIDMASK0_V3_GET_NODE_MASK(df->adf_mask0);
816 		decomp->dfd_node_shift =
817 		    DF_FIDMASK1_V3_GET_NODE_SHIFT(df->adf_mask1);
818 		decomp->dfd_comp_mask =
819 		    DF_FIDMASK0_V3_GET_COMP_MASK(df->adf_mask0);
820 		decomp->dfd_comp_shift = 0;
821 		break;
822 	case DF_REV_3P5:
823 		df->adf_syscfg = amdzen_df_read32_bcast(azn, df,
824 		    DF_SYSCFG_V3P5);
825 		df->adf_mask0 =  amdzen_df_read32_bcast(azn, df,
826 		    DF_FIDMASK0_V3P5);
827 		df->adf_mask1 =  amdzen_df_read32_bcast(azn, df,
828 		    DF_FIDMASK1_V3P5);
829 		df->adf_mask2 =  amdzen_df_read32_bcast(azn, df,
830 		    DF_FIDMASK2_V3P5);
831 
832 		decomp->dfd_sock_mask =
833 		    DF_FIDMASK2_V3P5_GET_SOCK_MASK(df->adf_mask2);
834 		decomp->dfd_sock_shift =
835 		    DF_FIDMASK1_V3P5_GET_SOCK_SHIFT(df->adf_mask1);
836 		decomp->dfd_die_mask =
837 		    DF_FIDMASK2_V3P5_GET_DIE_MASK(df->adf_mask2);
838 		decomp->dfd_die_shift = 0;
839 		decomp->dfd_node_mask =
840 		    DF_FIDMASK0_V3P5_GET_NODE_MASK(df->adf_mask0);
841 		decomp->dfd_node_shift =
842 		    DF_FIDMASK1_V3P5_GET_NODE_SHIFT(df->adf_mask1);
843 		decomp->dfd_comp_mask =
844 		    DF_FIDMASK0_V3P5_GET_COMP_MASK(df->adf_mask0);
845 		decomp->dfd_comp_shift = 0;
846 		break;
847 	case DF_REV_4:
848 		df->adf_syscfg = amdzen_df_read32_bcast(azn, df, DF_SYSCFG_V4);
849 		df->adf_mask0 =  amdzen_df_read32_bcast(azn, df,
850 		    DF_FIDMASK0_V4);
851 		df->adf_mask1 =  amdzen_df_read32_bcast(azn, df,
852 		    DF_FIDMASK1_V4);
853 		df->adf_mask2 =  amdzen_df_read32_bcast(azn, df,
854 		    DF_FIDMASK2_V4);
855 
856 		/*
857 		 * The DFv4 registers are at a different location in the DF;
858 		 * however, the actual layout of fields is the same as DFv3.5.
859 		 * This is why you see V3P5 below.
860 		 */
861 		decomp->dfd_sock_mask =
862 		    DF_FIDMASK2_V3P5_GET_SOCK_MASK(df->adf_mask2);
863 		decomp->dfd_sock_shift =
864 		    DF_FIDMASK1_V3P5_GET_SOCK_SHIFT(df->adf_mask1);
865 		decomp->dfd_die_mask =
866 		    DF_FIDMASK2_V3P5_GET_DIE_MASK(df->adf_mask2);
867 		decomp->dfd_die_shift = 0;
868 		decomp->dfd_node_mask =
869 		    DF_FIDMASK0_V3P5_GET_NODE_MASK(df->adf_mask0);
870 		decomp->dfd_node_shift =
871 		    DF_FIDMASK1_V3P5_GET_NODE_SHIFT(df->adf_mask1);
872 		decomp->dfd_comp_mask =
873 		    DF_FIDMASK0_V3P5_GET_COMP_MASK(df->adf_mask0);
874 		decomp->dfd_comp_shift = 0;
875 		break;
876 	default:
877 		panic("encountered suspicious, previously rejected DF "
878 		    "rev: 0x%x", df->adf_rev);
879 	}
880 }
881 
882 /*
883  * Initialize our knowledge about a given series of nodes on the data fabric.
884  */
885 static void
886 amdzen_setup_df(amdzen_t *azn, amdzen_df_t *df)
887 {
888 	uint_t i;
889 	uint32_t val;
890 
891 	amdzen_determine_df_vers(azn, df);
892 
893 	switch (df->adf_rev) {
894 	case DF_REV_2:
895 	case DF_REV_3:
896 	case DF_REV_3P5:
897 		val = amdzen_df_read32_bcast(azn, df, DF_CFG_ADDR_CTL_V2);
898 		break;
899 	case DF_REV_4:
900 		val = amdzen_df_read32_bcast(azn, df, DF_CFG_ADDR_CTL_V4);
901 		break;
902 	default:
903 		dev_err(azn->azn_dip, CE_WARN, "encountered unsupported DF "
904 		    "revision: 0x%x", df->adf_rev);
905 		return;
906 	}
907 	df->adf_nb_busno = DF_CFG_ADDR_CTL_GET_BUS_NUM(val);
908 	val = amdzen_df_read32_bcast(azn, df, DF_FBICNT);
909 	df->adf_nents = DF_FBICNT_GET_COUNT(val);
910 	if (df->adf_nents == 0)
911 		return;
912 	df->adf_ents = kmem_zalloc(sizeof (amdzen_df_ent_t) * df->adf_nents,
913 	    KM_SLEEP);
914 
915 	for (i = 0; i < df->adf_nents; i++) {
916 		amdzen_df_ent_t *dfe = &df->adf_ents[i];
917 		uint8_t inst = i;
918 
919 		/*
920 		 * Unfortunately, Rome uses a discontinuous instance ID pattern
921 		 * while everything else we can find uses a contiguous instance
922 		 * ID pattern.  This means that for Rome, we need to adjust the
923 		 * indexes that we iterate over, though the total number of
924 		 * entries is right. This was carried over into Milan, but not
925 		 * Genoa.
926 		 */
927 		if (amdzen_is_rome_style(df->adf_funcs[0]->azns_did)) {
928 			if (inst > ARRAY_SIZE(amdzen_df_rome_ids)) {
929 				dev_err(azn->azn_dip, CE_WARN, "Rome family "
930 				    "processor reported more ids than the PPR, "
931 				    "resetting %u to instance zero", inst);
932 				inst = 0;
933 			} else {
934 				inst = amdzen_df_rome_ids[inst];
935 			}
936 		}
937 
938 		dfe->adfe_drvid = inst;
939 		dfe->adfe_info0 = amdzen_df_read32(azn, df, inst, DF_FBIINFO0);
940 		dfe->adfe_info1 = amdzen_df_read32(azn, df, inst, DF_FBIINFO1);
941 		dfe->adfe_info2 = amdzen_df_read32(azn, df, inst, DF_FBIINFO2);
942 		dfe->adfe_info3 = amdzen_df_read32(azn, df, inst, DF_FBIINFO3);
943 
944 		dfe->adfe_type = DF_FBIINFO0_GET_TYPE(dfe->adfe_info0);
945 		dfe->adfe_subtype = DF_FBIINFO0_GET_SUBTYPE(dfe->adfe_info0);
946 
947 		/*
948 		 * The enabled flag was not present in Zen 1. Simulate it by
949 		 * checking for a non-zero register instead.
950 		 */
951 		if (DF_FBIINFO0_V3_GET_ENABLED(dfe->adfe_info0) ||
952 		    (df->adf_rev == DF_REV_2 && dfe->adfe_info0 != 0)) {
953 			dfe->adfe_flags |= AMDZEN_DFE_F_ENABLED;
954 		}
955 		if (DF_FBIINFO0_GET_HAS_MCA(dfe->adfe_info0)) {
956 			dfe->adfe_flags |= AMDZEN_DFE_F_MCA;
957 		}
958 		dfe->adfe_inst_id = DF_FBIINFO3_GET_INSTID(dfe->adfe_info3);
959 		switch (df->adf_rev) {
960 		case DF_REV_2:
961 			dfe->adfe_fabric_id =
962 			    DF_FBIINFO3_V2_GET_BLOCKID(dfe->adfe_info3);
963 			break;
964 		case DF_REV_3:
965 			dfe->adfe_fabric_id =
966 			    DF_FBIINFO3_V3_GET_BLOCKID(dfe->adfe_info3);
967 			break;
968 		case DF_REV_3P5:
969 			dfe->adfe_fabric_id =
970 			    DF_FBIINFO3_V3P5_GET_BLOCKID(dfe->adfe_info3);
971 			break;
972 		case DF_REV_4:
973 			dfe->adfe_fabric_id =
974 			    DF_FBIINFO3_V4_GET_BLOCKID(dfe->adfe_info3);
975 			break;
976 		default:
977 			panic("encountered suspicious, previously rejected DF "
978 			    "rev: 0x%x", df->adf_rev);
979 		}
980 	}
981 
982 	amdzen_determine_fabric_decomp(azn, df);
983 }
984 
985 static void
986 amdzen_find_nb(amdzen_t *azn, amdzen_df_t *df)
987 {
988 	amdzen_stub_t *stub;
989 
990 	for (stub = list_head(&azn->azn_nb_stubs); stub != NULL;
991 	    stub = list_next(&azn->azn_nb_stubs, stub)) {
992 		if (stub->azns_bus == df->adf_nb_busno) {
993 			df->adf_flags |= AMDZEN_DF_F_FOUND_NB;
994 			df->adf_nb = stub;
995 			return;
996 		}
997 	}
998 }
999 
1000 static void
1001 amdzen_nexus_init(void *arg)
1002 {
1003 	uint_t i;
1004 	amdzen_t *azn = arg;
1005 
1006 	/*
1007 	 * First go through all of the stubs and assign the DF entries.
1008 	 */
1009 	mutex_enter(&azn->azn_mutex);
1010 	if (!amdzen_map_dfs(azn) || !amdzen_check_dfs(azn)) {
1011 		azn->azn_flags |= AMDZEN_F_MAP_ERROR;
1012 		goto done;
1013 	}
1014 
1015 	for (i = 0; i < AMDZEN_MAX_DFS; i++) {
1016 		amdzen_df_t *df = &azn->azn_dfs[i];
1017 
1018 		if ((df->adf_flags & AMDZEN_DF_F_VALID) == 0)
1019 			continue;
1020 		amdzen_setup_df(azn, df);
1021 		amdzen_find_nb(azn, df);
1022 	}
1023 
1024 	/*
1025 	 * Not all children may be installed. As such, we do not treat the
1026 	 * failure of a child as fatal to the driver.
1027 	 */
1028 	mutex_exit(&azn->azn_mutex);
1029 	for (i = 0; i < ARRAY_SIZE(amdzen_children); i++) {
1030 		(void) amdzen_create_child(azn, &amdzen_children[i]);
1031 	}
1032 	mutex_enter(&azn->azn_mutex);
1033 
1034 done:
1035 	azn->azn_flags &= ~AMDZEN_F_ATTACH_DISPATCHED;
1036 	azn->azn_flags |= AMDZEN_F_ATTACH_COMPLETE;
1037 	azn->azn_taskqid = TASKQID_INVALID;
1038 	cv_broadcast(&azn->azn_cv);
1039 	mutex_exit(&azn->azn_mutex);
1040 }
1041 
1042 static int
1043 amdzen_stub_scan_cb(dev_info_t *dip, void *arg)
1044 {
1045 	amdzen_t *azn = arg;
1046 	uint16_t vid, did;
1047 	int *regs;
1048 	uint_t nregs, i;
1049 	boolean_t match = B_FALSE;
1050 
1051 	if (dip == ddi_root_node()) {
1052 		return (DDI_WALK_CONTINUE);
1053 	}
1054 
1055 	/*
1056 	 * If a node in question is not a pci node, then we have no interest in
1057 	 * it as all the stubs that we care about are related to pci devices.
1058 	 */
1059 	if (strncmp("pci", ddi_get_name(dip), 3) != 0) {
1060 		return (DDI_WALK_PRUNECHILD);
1061 	}
1062 
1063 	/*
1064 	 * If we can't get a device or vendor ID and prove that this is an AMD
1065 	 * part, then we don't care about it.
1066 	 */
1067 	vid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1068 	    "vendor-id", PCI_EINVAL16);
1069 	did = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1070 	    "device-id", PCI_EINVAL16);
1071 	if (vid == PCI_EINVAL16 || did == PCI_EINVAL16) {
1072 		return (DDI_WALK_CONTINUE);
1073 	}
1074 
1075 	if (vid != AMDZEN_PCI_VID_AMD && vid != AMDZEN_PCI_VID_HYGON) {
1076 		return (DDI_WALK_CONTINUE);
1077 	}
1078 
1079 	for (i = 0; i < ARRAY_SIZE(amdzen_nb_ids); i++) {
1080 		if (amdzen_nb_ids[i] == did) {
1081 			match = B_TRUE;
1082 		}
1083 	}
1084 
1085 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1086 	    "reg", &regs, &nregs) != DDI_PROP_SUCCESS) {
1087 		return (DDI_WALK_CONTINUE);
1088 	}
1089 
1090 	if (nregs == 0) {
1091 		ddi_prop_free(regs);
1092 		return (DDI_WALK_CONTINUE);
1093 	}
1094 
1095 	if (PCI_REG_BUS_G(regs[0]) == AMDZEN_DF_BUSNO &&
1096 	    PCI_REG_DEV_G(regs[0]) >= AMDZEN_DF_FIRST_DEVICE) {
1097 		match = B_TRUE;
1098 	}
1099 
1100 	ddi_prop_free(regs);
1101 	if (match) {
1102 		mutex_enter(&azn->azn_mutex);
1103 		azn->azn_nscanned++;
1104 		mutex_exit(&azn->azn_mutex);
1105 	}
1106 
1107 	return (DDI_WALK_CONTINUE);
1108 }
1109 
1110 static void
1111 amdzen_stub_scan(void *arg)
1112 {
1113 	amdzen_t *azn = arg;
1114 
1115 	mutex_enter(&azn->azn_mutex);
1116 	azn->azn_nscanned = 0;
1117 	mutex_exit(&azn->azn_mutex);
1118 
1119 	ddi_walk_devs(ddi_root_node(), amdzen_stub_scan_cb, azn);
1120 
1121 	mutex_enter(&azn->azn_mutex);
1122 	azn->azn_flags &= ~AMDZEN_F_SCAN_DISPATCHED;
1123 	azn->azn_flags |= AMDZEN_F_SCAN_COMPLETE;
1124 
1125 	if (azn->azn_nscanned == 0) {
1126 		azn->azn_flags |= AMDZEN_F_UNSUPPORTED;
1127 		azn->azn_taskqid = TASKQID_INVALID;
1128 		cv_broadcast(&azn->azn_cv);
1129 	} else if (azn->azn_npresent == azn->azn_nscanned) {
1130 		azn->azn_flags |= AMDZEN_F_ATTACH_DISPATCHED;
1131 		azn->azn_taskqid = taskq_dispatch(system_taskq,
1132 		    amdzen_nexus_init, azn, TQ_SLEEP);
1133 	}
1134 	mutex_exit(&azn->azn_mutex);
1135 }
1136 
1137 /*
1138  * Unfortunately we can't really let the stubs detach as we may need them to be
1139  * available for client operations. We may be able to improve this if we know
1140  * that the actual nexus is going away. However, as long as it's active, we need
1141  * all the stubs.
1142  */
1143 int
1144 amdzen_detach_stub(dev_info_t *dip, ddi_detach_cmd_t cmd)
1145 {
1146 	if (cmd == DDI_SUSPEND) {
1147 		return (DDI_SUCCESS);
1148 	}
1149 
1150 	return (DDI_FAILURE);
1151 }
1152 
1153 int
1154 amdzen_attach_stub(dev_info_t *dip, ddi_attach_cmd_t cmd)
1155 {
1156 	int *regs, reg;
1157 	uint_t nregs, i;
1158 	uint16_t vid, did;
1159 	amdzen_stub_t *stub;
1160 	amdzen_t *azn = amdzen_data;
1161 	boolean_t valid = B_FALSE;
1162 	boolean_t nb = B_FALSE;
1163 
1164 	if (cmd == DDI_RESUME) {
1165 		return (DDI_SUCCESS);
1166 	} else if (cmd != DDI_ATTACH) {
1167 		return (DDI_FAILURE);
1168 	}
1169 
1170 	/*
1171 	 * Make sure that the stub that we've been asked to attach is a pci type
1172 	 * device. If not, then there is no reason for us to proceed.
1173 	 */
1174 	if (strncmp("pci", ddi_get_name(dip), 3) != 0) {
1175 		dev_err(dip, CE_WARN, "asked to attach a bad AMD Zen nexus "
1176 		    "stub: %s", ddi_get_name(dip));
1177 		return (DDI_FAILURE);
1178 	}
1179 	vid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1180 	    "vendor-id", PCI_EINVAL16);
1181 	did = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1182 	    "device-id", PCI_EINVAL16);
1183 	if (vid == PCI_EINVAL16 || did == PCI_EINVAL16) {
1184 		dev_err(dip, CE_WARN, "failed to get PCI ID properties");
1185 		return (DDI_FAILURE);
1186 	}
1187 
1188 	if (vid != AMDZEN_PCI_VID_AMD && vid != AMDZEN_PCI_VID_HYGON) {
1189 		dev_err(dip, CE_WARN, "expected vendor ID (0x%x), found 0x%x",
1190 		    cpuid_getvendor(CPU) == X86_VENDOR_HYGON ?
1191 		    AMDZEN_PCI_VID_HYGON : AMDZEN_PCI_VID_AMD, vid);
1192 		return (DDI_FAILURE);
1193 	}
1194 
1195 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1196 	    "reg", &regs, &nregs) != DDI_PROP_SUCCESS) {
1197 		dev_err(dip, CE_WARN, "failed to get 'reg' property");
1198 		return (DDI_FAILURE);
1199 	}
1200 
1201 	if (nregs == 0) {
1202 		ddi_prop_free(regs);
1203 		dev_err(dip, CE_WARN, "missing 'reg' property values");
1204 		return (DDI_FAILURE);
1205 	}
1206 	reg = *regs;
1207 	ddi_prop_free(regs);
1208 
1209 	for (i = 0; i < ARRAY_SIZE(amdzen_nb_ids); i++) {
1210 		if (amdzen_nb_ids[i] == did) {
1211 			valid = B_TRUE;
1212 			nb = B_TRUE;
1213 		}
1214 	}
1215 
1216 	if (!valid && PCI_REG_BUS_G(reg) == AMDZEN_DF_BUSNO &&
1217 	    PCI_REG_DEV_G(reg) >= AMDZEN_DF_FIRST_DEVICE) {
1218 		valid = B_TRUE;
1219 		nb = B_FALSE;
1220 	}
1221 
1222 	if (!valid) {
1223 		dev_err(dip, CE_WARN, "device %s didn't match the nexus list",
1224 		    ddi_get_name(dip));
1225 		return (DDI_FAILURE);
1226 	}
1227 
1228 	stub = kmem_alloc(sizeof (amdzen_stub_t), KM_SLEEP);
1229 	if (pci_config_setup(dip, &stub->azns_cfgspace) != DDI_SUCCESS) {
1230 		dev_err(dip, CE_WARN, "failed to set up config space");
1231 		kmem_free(stub, sizeof (amdzen_stub_t));
1232 		return (DDI_FAILURE);
1233 	}
1234 
1235 	stub->azns_dip = dip;
1236 	stub->azns_vid = vid;
1237 	stub->azns_did = did;
1238 	stub->azns_bus = PCI_REG_BUS_G(reg);
1239 	stub->azns_dev = PCI_REG_DEV_G(reg);
1240 	stub->azns_func = PCI_REG_FUNC_G(reg);
1241 	ddi_set_driver_private(dip, stub);
1242 
1243 	mutex_enter(&azn->azn_mutex);
1244 	azn->azn_npresent++;
1245 	if (nb) {
1246 		list_insert_tail(&azn->azn_nb_stubs, stub);
1247 	} else {
1248 		list_insert_tail(&azn->azn_df_stubs, stub);
1249 	}
1250 
1251 	if ((azn->azn_flags & AMDZEN_F_TASKQ_MASK) == AMDZEN_F_SCAN_COMPLETE &&
1252 	    azn->azn_nscanned == azn->azn_npresent) {
1253 		azn->azn_flags |= AMDZEN_F_ATTACH_DISPATCHED;
1254 		azn->azn_taskqid = taskq_dispatch(system_taskq,
1255 		    amdzen_nexus_init, azn, TQ_SLEEP);
1256 	}
1257 	mutex_exit(&azn->azn_mutex);
1258 
1259 	return (DDI_SUCCESS);
1260 }
1261 
1262 static int
1263 amdzen_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
1264     void *arg, void *result)
1265 {
1266 	char buf[32];
1267 	dev_info_t *child;
1268 	const amdzen_child_data_t *acd;
1269 
1270 	switch (ctlop) {
1271 	case DDI_CTLOPS_REPORTDEV:
1272 		if (rdip == NULL) {
1273 			return (DDI_FAILURE);
1274 		}
1275 		cmn_err(CE_CONT, "amdzen nexus: %s@%s, %s%d\n",
1276 		    ddi_node_name(rdip), ddi_get_name_addr(rdip),
1277 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
1278 		break;
1279 	case DDI_CTLOPS_INITCHILD:
1280 		child = arg;
1281 		if (child == NULL) {
1282 			dev_err(dip, CE_WARN, "!no child passed for "
1283 			    "DDI_CTLOPS_INITCHILD");
1284 		}
1285 
1286 		acd = ddi_get_parent_data(child);
1287 		if (acd == NULL) {
1288 			dev_err(dip, CE_WARN, "!missing child parent data");
1289 			return (DDI_FAILURE);
1290 		}
1291 
1292 		if (snprintf(buf, sizeof (buf), "%d", acd->acd_addr) >=
1293 		    sizeof (buf)) {
1294 			dev_err(dip, CE_WARN, "!failed to construct device "
1295 			    "addr due to overflow");
1296 			return (DDI_FAILURE);
1297 		}
1298 
1299 		ddi_set_name_addr(child, buf);
1300 		break;
1301 	case DDI_CTLOPS_UNINITCHILD:
1302 		child = arg;
1303 		if (child == NULL) {
1304 			dev_err(dip, CE_WARN, "!no child passed for "
1305 			    "DDI_CTLOPS_UNINITCHILD");
1306 		}
1307 
1308 		ddi_set_name_addr(child, NULL);
1309 		break;
1310 	default:
1311 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
1312 	}
1313 	return (DDI_SUCCESS);
1314 }
1315 
1316 static int
1317 amdzen_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1318 {
1319 	amdzen_t *azn = amdzen_data;
1320 
1321 	if (cmd == DDI_RESUME) {
1322 		return (DDI_SUCCESS);
1323 	} else if (cmd != DDI_ATTACH) {
1324 		return (DDI_FAILURE);
1325 	}
1326 
1327 	mutex_enter(&azn->azn_mutex);
1328 	if (azn->azn_dip != NULL) {
1329 		dev_err(dip, CE_WARN, "driver is already attached!");
1330 		mutex_exit(&azn->azn_mutex);
1331 		return (DDI_FAILURE);
1332 	}
1333 
1334 	azn->azn_dip = dip;
1335 	azn->azn_taskqid = taskq_dispatch(system_taskq, amdzen_stub_scan,
1336 	    azn, TQ_SLEEP);
1337 	azn->azn_flags |= AMDZEN_F_SCAN_DISPATCHED;
1338 	mutex_exit(&azn->azn_mutex);
1339 
1340 	return (DDI_SUCCESS);
1341 }
1342 
1343 static int
1344 amdzen_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1345 {
1346 	amdzen_t *azn = amdzen_data;
1347 
1348 	if (cmd == DDI_SUSPEND) {
1349 		return (DDI_SUCCESS);
1350 	} else if (cmd != DDI_DETACH) {
1351 		return (DDI_FAILURE);
1352 	}
1353 
1354 	mutex_enter(&azn->azn_mutex);
1355 	while (azn->azn_taskqid != TASKQID_INVALID) {
1356 		cv_wait(&azn->azn_cv, &azn->azn_mutex);
1357 	}
1358 
1359 	/*
1360 	 * If we've attached any stub drivers, e.g. this platform is important
1361 	 * for us, then we fail detach.
1362 	 */
1363 	if (!list_is_empty(&azn->azn_df_stubs) ||
1364 	    !list_is_empty(&azn->azn_nb_stubs)) {
1365 		mutex_exit(&azn->azn_mutex);
1366 		return (DDI_FAILURE);
1367 	}
1368 
1369 	azn->azn_dip = NULL;
1370 	mutex_exit(&azn->azn_mutex);
1371 
1372 	return (DDI_SUCCESS);
1373 }
1374 
1375 static void
1376 amdzen_free(void)
1377 {
1378 	if (amdzen_data == NULL) {
1379 		return;
1380 	}
1381 
1382 	VERIFY(list_is_empty(&amdzen_data->azn_df_stubs));
1383 	list_destroy(&amdzen_data->azn_df_stubs);
1384 	VERIFY(list_is_empty(&amdzen_data->azn_nb_stubs));
1385 	list_destroy(&amdzen_data->azn_nb_stubs);
1386 	cv_destroy(&amdzen_data->azn_cv);
1387 	mutex_destroy(&amdzen_data->azn_mutex);
1388 	kmem_free(amdzen_data, sizeof (amdzen_t));
1389 	amdzen_data = NULL;
1390 }
1391 
1392 static void
1393 amdzen_alloc(void)
1394 {
1395 	amdzen_data = kmem_zalloc(sizeof (amdzen_t), KM_SLEEP);
1396 	mutex_init(&amdzen_data->azn_mutex, NULL, MUTEX_DRIVER, NULL);
1397 	list_create(&amdzen_data->azn_df_stubs, sizeof (amdzen_stub_t),
1398 	    offsetof(amdzen_stub_t, azns_link));
1399 	list_create(&amdzen_data->azn_nb_stubs, sizeof (amdzen_stub_t),
1400 	    offsetof(amdzen_stub_t, azns_link));
1401 	cv_init(&amdzen_data->azn_cv, NULL, CV_DRIVER, NULL);
1402 }
1403 
1404 struct bus_ops amdzen_bus_ops = {
1405 	.busops_rev = BUSO_REV,
1406 	.bus_map = nullbusmap,
1407 	.bus_dma_map = ddi_no_dma_map,
1408 	.bus_dma_allochdl = ddi_no_dma_allochdl,
1409 	.bus_dma_freehdl = ddi_no_dma_freehdl,
1410 	.bus_dma_bindhdl = ddi_no_dma_bindhdl,
1411 	.bus_dma_unbindhdl = ddi_no_dma_unbindhdl,
1412 	.bus_dma_flush = ddi_no_dma_flush,
1413 	.bus_dma_win = ddi_no_dma_win,
1414 	.bus_dma_ctl = ddi_no_dma_mctl,
1415 	.bus_prop_op = ddi_bus_prop_op,
1416 	.bus_ctl = amdzen_bus_ctl
1417 };
1418 
1419 static struct dev_ops amdzen_dev_ops = {
1420 	.devo_rev = DEVO_REV,
1421 	.devo_refcnt = 0,
1422 	.devo_getinfo = nodev,
1423 	.devo_identify = nulldev,
1424 	.devo_probe = nulldev,
1425 	.devo_attach = amdzen_attach,
1426 	.devo_detach = amdzen_detach,
1427 	.devo_reset = nodev,
1428 	.devo_quiesce = ddi_quiesce_not_needed,
1429 	.devo_bus_ops = &amdzen_bus_ops
1430 };
1431 
1432 static struct modldrv amdzen_modldrv = {
1433 	.drv_modops = &mod_driverops,
1434 	.drv_linkinfo = "AMD Zen Nexus Driver",
1435 	.drv_dev_ops = &amdzen_dev_ops
1436 };
1437 
1438 static struct modlinkage amdzen_modlinkage = {
1439 	.ml_rev = MODREV_1,
1440 	.ml_linkage = { &amdzen_modldrv, NULL }
1441 };
1442 
1443 int
1444 _init(void)
1445 {
1446 	int ret;
1447 
1448 	if (cpuid_getvendor(CPU) != X86_VENDOR_AMD &&
1449 	    cpuid_getvendor(CPU) != X86_VENDOR_HYGON) {
1450 		return (ENOTSUP);
1451 	}
1452 
1453 	if ((ret = mod_install(&amdzen_modlinkage)) == 0) {
1454 		amdzen_alloc();
1455 	}
1456 
1457 	return (ret);
1458 }
1459 
1460 int
1461 _info(struct modinfo *modinfop)
1462 {
1463 	return (mod_info(&amdzen_modlinkage, modinfop));
1464 }
1465 
1466 int
1467 _fini(void)
1468 {
1469 	int ret;
1470 
1471 	if ((ret = mod_remove(&amdzen_modlinkage)) == 0) {
1472 		amdzen_free();
1473 	}
1474 
1475 	return (ret);
1476 }
1477