xref: /illumos-gate/usr/src/uts/intel/sys/amdzen/umc.h (revision 7a6d80f1660abd4755c68cbd094d4a914681d26e)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2022 Oxide Computer Company
14  */
15 
16 #ifndef _SYS_UMC_H
17 #define	_SYS_UMC_H
18 
19 #include <sys/bitext.h>
20 #include <sys/amdzen/smn.h>
21 
22 /*
23  * Various register definitions for accessing the AMD Unified Memory Controller
24  * (UMC) over SMN (the system management network). Note, that the SMN exists
25  * independently in each die and must be accessed through the appropriate
26  * IOHC.
27  *
28  * There are effectively four different revisions of the UMC that we know about
29  * and support querying:
30  *
31  *   o DDR4 capable APUs
32  *   o DDR4 capable CPUs
33  *   o DDR5 capable APUs
34  *   o DDR5 capable CPUs
35  *
36  * In general for a given revision and generation of a controller (DDR4 vs.
37  * DDR5), all of the address layouts are the same whether it is for an APU or a
38  * CPU. The main difference is generally in the number of features. For example,
39  * most APUs may not support the same rank multiplication bits and related in a
40  * device. However, unlike the DF where everything changes, the main difference
41  * within a generation is just which bits are implemented. This makes it much
42  * easier to define UMC information.
43  *
44  * Between DDR4 and DDR5 based devices, the register locations have shifted;
45  * however, generally speaking, the registers themselves are actually the same.
46  * Registers here, similar to the DF, have a common form:
47  *
48  * UMC_<reg name>_<vers>
49  *
50  * Here, <reg name> would be something like 'BASE', for the UMC
51  * UMC::CH::BaseAddr register. <vers> is one of DDR4 or DDR5. When the same
52  * register is supported at the same address between versions, then <vers> is
53  * elided.
54  *
55  * For fields inside of these registers, everything follows the same pattern in
56  * <sys/amdzen/df.h> which is:
57  *
58  * UMC_<reg name>_<vers>_GET_<field>
59  *
60  * Note, <vers> will be elided if the register is the same between the DDR4 and
61  * DDR5 versions.
62  *
63  * Finally, a cautionary note. While the DF provided a way for us to determine
64  * what version something is, we have not determined a way to programmatically
65  * determine what something supports outside of making notes based on the
66  * family, model, and stepping CPUID information. Unfortunately, you must look
67  * towards the documentation and find what you need in the PPR (processor
68  * programming reference).
69  */
70 
71 #ifdef __cplusplus
72 extern "C" {
73 #endif
74 
75 /*
76  * UMC Channel registers. These are in SMN Space. DDR4 and DDR5 based UMCs share
77  * the same base address, somewhat surprisingly. This constructs the appropriate
78  * offset and ensures that a caller doesn't exceed the number of known instances
79  * of the register.  See smn.h for additional details on SMN addressing.  All
80  * UMC registers are 32 bits wide; we check for violations.
81  */
82 
83 static inline smn_reg_t
84 amdzen_umc_smn_reg(const uint8_t umcno, const smn_reg_def_t def,
85     const uint16_t reginst)
86 {
87 	const uint32_t APERTURE_BASE = 0x50000;
88 	const uint32_t APERTURE_MASK = 0xffffe000;
89 
90 	const uint32_t umc32 = (const uint32_t)umcno;
91 	const uint32_t reginst32 = (const uint32_t)reginst;
92 
93 	const uint32_t stride = (def.srd_stride == 0) ? 4 : def.srd_stride;
94 	const uint32_t nents = (def.srd_nents == 0) ? 1 :
95 	    (const uint32_t)def.srd_nents;
96 
97 	ASSERT0(def.srd_size);
98 	ASSERT3S(def.srd_unit, ==, SMN_UNIT_UMC);
99 	ASSERT0(def.srd_reg & APERTURE_MASK);
100 	ASSERT3U(umc32, <, 12);
101 	ASSERT3U(nents, >, reginst32);
102 
103 	const uint32_t aperture_off = umc32 << 20;
104 	ASSERT3U(aperture_off, <=, UINT32_MAX - APERTURE_BASE);
105 
106 	const uint32_t aperture = APERTURE_BASE + aperture_off;
107 	ASSERT0(aperture & ~APERTURE_MASK);
108 
109 	const uint32_t reg = def.srd_reg + reginst32 * stride;
110 	ASSERT0(reg & APERTURE_MASK);
111 
112 	return (SMN_MAKE_REG(aperture + reg));
113 }
114 
115 /*
116  * UMC::CH::BaseAddr, UMC::CH::BaseAddrSec -- determines the base address used
117  * to match a chip select. Instances 0/1 always refer to DIMM 0, while
118  * instances 2/3 always refer to DIMM 1.
119  */
120 /*CSTYLED*/
121 #define	D_UMC_BASE	(const smn_reg_def_t){	\
122 	.srd_unit = SMN_UNIT_UMC,	\
123 	.srd_reg = 0x00,	\
124 	.srd_nents = 4	\
125 }
126 /*CSTYLED*/
127 #define	D_UMC_BASE_SEC	(const smn_reg_def_t){	\
128 	.srd_unit = SMN_UNIT_UMC,	\
129 	.srd_reg = 0x10,	\
130 	.srd_nents = 4	\
131 }
132 #define	UMC_BASE(u, i)		amdzen_umc_smn_reg(u, D_UMC_BASE, i)
133 #define	UMC_BASE_SEC(u, i)	amdzen_umc_smn_reg(u, D_UMC_BASE_SEC, i)
134 #define	UMC_BASE_GET_ADDR(r)	bitx32(r, 31, 1)
135 #define	UMC_BASE_ADDR_SHIFT	9
136 #define	UMC_BASE_GET_EN(r)	bitx32(r, 0, 0)
137 
138 /*
139  * UMC::BaseAddrExt, UMC::BaseAddrSecExt -- The first of several extensions to
140  * registers that allow more address bits. Note, only present in some DDR5
141  * capable SoCs.
142  */
143 /*CSTYLED*/
144 #define	D_UMC_BASE_EXT_DDR5	(const smn_reg_def_t){	\
145 	.srd_unit = SMN_UNIT_UMC,	\
146 	.srd_reg = 0xb00,	\
147 	.srd_nents = 4	\
148 }
149 /*CSTYLED*/
150 #define	D_UMC_BASE_EXT_SEC_DDR5	(const smn_reg_def_t){	\
151 	.srd_unit = SMN_UNIT_UMC,	\
152 	.srd_reg = 0xb10,	\
153 	.srd_nents = 4	\
154 }
155 #define	UMC_BASE_EXT_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_BASE_EXT_DDR5, i)
156 #define	UMC_BASE_EXT_SEC_DDR5(u, i)	\
157     amdzen_umc_smn_reg(u, D_UMC_BASE_EXT_SEC_DDR5, i)
158 #define	UMC_BASE_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
159 #define	UMC_BASE_EXT_ADDR_SHIFT		40
160 
161 
162 /*
163  * UMC::CH::AddrMask, UMC::CH::AddrMaskSec -- This register is used to compare
164  * the incoming address to see it matches the base. Tweaking what is used for
165  * match is often part of the interleaving strategy.
166  */
167 /*CSTYLED*/
168 #define	D_UMC_MASK_DDR4	(const smn_reg_def_t){	\
169 	.srd_unit = SMN_UNIT_UMC,	\
170 	.srd_reg = 0x20,	\
171 	.srd_nents = 2	\
172 }
173 /*CSTYLED*/
174 #define	D_UMC_MASK_SEC_DDR4	(const smn_reg_def_t){	\
175 	.srd_unit = SMN_UNIT_UMC,	\
176 	.srd_reg = 0x28,	\
177 	.srd_nents = 2	\
178 }
179 /*CSTYLED*/
180 #define	D_UMC_MASK_DDR5	(const smn_reg_def_t){	\
181 	.srd_unit = SMN_UNIT_UMC,	\
182 	.srd_reg = 0x20,	\
183 	.srd_nents = 4	\
184 }
185 /*CSTYLED*/
186 #define	D_UMC_MASK_SEC_DDR5	(const smn_reg_def_t){	\
187 	.srd_unit = SMN_UNIT_UMC,	\
188 	.srd_reg = 0x30,	\
189 	.srd_nents = 4	\
190 }
191 #define	UMC_MASK_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_DDR4, i)
192 #define	UMC_MASK_SEC_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_SEC_DDR4, i)
193 #define	UMC_MASK_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_DDR5, i)
194 #define	UMC_MASK_SEC_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_SEC_DDR5, i)
195 #define	UMC_MASK_GET_ADDR(r)	bitx32(r, 31, 1)
196 #define	UMC_MASK_ADDR_SHIFT	9
197 
198 /*
199  * UMC::AddrMaskExt, UMC::AddrMaskSecExt -- Extended mask addresses.
200  */
201 /*CSTYLED*/
202 #define	D_UMC_MASK_EXT_DDR5	(const smn_reg_def_t){	\
203 	.srd_unit = SMN_UNIT_UMC,	\
204 	.srd_reg = 0xb20,	\
205 	.srd_nents = 4	\
206 }
207 /*CSTYLED*/
208 #define	D_UMC_MASK_EXT_SEC_DDR5	(const smn_reg_def_t){	\
209 	.srd_unit = SMN_UNIT_UMC,	\
210 	.srd_reg = 0xb30,	\
211 	.srd_nents = 4	\
212 }
213 #define	UMC_MASK_EXT_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_EXT_DDR5, i)
214 #define	UMC_MASK_EXT_SEC_DDR5(u, i)	\
215     amdzen_umc_smn_reg(u, D_UMC_MASK_EXT_SEC_DDR5, i)
216 #define	UMC_MASK_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
217 #define	UMC_MASK_EXT_ADDR_SHIFT		40
218 
219 /*
220  * UMC::CH::AddrCfg -- This register contains a number of bits that describe how
221  * the address is actually used, one per DIMM. Note, not all members are valid
222  * for all classes of DIMMs. It's worth calling out that the total number of
223  * banks value here describes the total number of banks on the entire chip, e.g.
224  * it is bank groups * banks/groups. Therefore to determine the number of
225  * banks/group you must subtract the number of bank group bits from the total
226  * number of bank bits.
227  */
228 /*CSTYLED*/
229 #define	D_UMC_ADDRCFG_DDR4	(const smn_reg_def_t){	\
230 	.srd_unit = SMN_UNIT_UMC,	\
231 	.srd_reg = 0x30,	\
232 	.srd_nents = 2	\
233 }
234 /*CSTYLED*/
235 #define	D_UMC_ADDRCFG_DDR5	(const smn_reg_def_t){	\
236 	.srd_unit = SMN_UNIT_UMC,	\
237 	.srd_reg = 0x40,	\
238 	.srd_nents = 4	\
239 }
240 #define	UMC_ADDRCFG_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRCFG_DDR4, i)
241 #define	UMC_ADDRCFG_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRCFG_DDR5, i)
242 #define	UMC_ADDRCFG_GET_NBANK_BITS(r)		bitx32(r, 21, 20)
243 #define	UMC_ADDRCFG_NBANK_BITS_BASE		3
244 #define	UMC_ADDRCFG_GET_NCOL_BITS(r)		bitx32(r, 19, 16)
245 #define	UMC_ADDRCFG_NCOL_BITS_BASE		5
246 #define	UMC_ADDRCFG_GET_NROW_BITS_LO(r)		bitx32(r, 11, 8)
247 #define	UMC_ADDRCFG_NROW_BITS_LO_BASE		10
248 #define	UMC_ADDRCFG_GET_NBANKGRP_BITS(r)	bitx32(r, 3, 2)
249 
250 #define	UMC_ADDRCFG_DDR4_GET_NROW_BITS_HI(r)	bitx32(r, 15, 12)
251 #define	UMC_ADDRCFG_DDR4_GET_NRM_BITS(r)	bitx32(r, 5, 4)
252 #define	UMC_ADDRCFG_DDR5_GET_CSXOR(r)		bitx32(r, 31, 30)
253 #define	UMC_ADDRCFG_DDR5_GET_NRM_BITS(r)	bitx32(r, 6, 4)
254 
255 /*
256  * UMC::CH::AddrSel -- This register is used to program how the actual bits in
257  * the normalized address map to the row and bank. While the bank can select
258  * which bits in the normalized address are used to construct the bank number,
259  * row bits are contiguous from the starting number.
260  */
261 /*CSTYLED*/
262 #define	D_UMC_ADDRSEL_DDR4	(const smn_reg_def_t){	\
263 	.srd_unit = SMN_UNIT_UMC,	\
264 	.srd_reg = 0x40,	\
265 	.srd_nents = 2	\
266 }
267 /*CSTYLED*/
268 #define	D_UMC_ADDRSEL_DDR5	(const smn_reg_def_t){	\
269 	.srd_unit = SMN_UNIT_UMC,	\
270 	.srd_reg = 0x50,	\
271 	.srd_nents = 4	\
272 }
273 #define	UMC_ADDRSEL_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRSEL_DDR4, i)
274 #define	UMC_ADDRSEL_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRSEL_DDR5, i)
275 #define	UMC_ADDRSEL_GET_ROW_LO(r)	bitx32(r, 27, 24)
276 #define	UMC_ADDRSEL_ROW_LO_BASE		12
277 #define	UMC_ADDRSEL_GET_BANK4(r)	bitx32(r, 19, 16)
278 #define	UMC_ADDRSEL_GET_BANK3(r)	bitx32(r, 15, 12)
279 #define	UMC_ADDRSEL_GET_BANK2(r)	bitx32(r, 11, 8)
280 #define	UMC_ADDRSEL_GET_BANK1(r)	bitx32(r, 7, 4)
281 #define	UMC_ADDRSEL_GET_BANK0(r)	bitx32(r, 3, 0)
282 #define	UMC_ADDRSEL_BANK_BASE		5
283 
284 #define	UMC_ADDRSEL_DDR4_GET_ROW_HI(r)	bitx32(r, 31, 28)
285 #define	UMC_ADDRSEL_DDR4_ROW_HI_BASE	24
286 
287 /*
288  * UMC::CH::ColSelLo, UMC::CH::ColSelHi -- This register selects which address
289  * bits map to the various column select bits. These registers interleave so in
290  * the case of DDR4, it's 0x50, 0x54 for DIMM 0 lo, hi. Then 0x58, 0x5c for
291  * DIMM1. DDR5 based entries do something similar; however, instead of being
292  * per-DIMM, there is one of these for each CS.
293  */
294 /*CSTYLED*/
295 #define	D_UMC_COLSEL_LO_DDR4	(const smn_reg_def_t){	\
296 	.srd_unit = SMN_UNIT_UMC,	\
297 	.srd_reg = 0x50,	\
298 	.srd_nents = 2,	\
299 	.srd_stride = 8	\
300 }
301 /*CSTYLED*/
302 #define	D_UMC_COLSEL_HI_DDR4	(const smn_reg_def_t){	\
303 	.srd_unit = SMN_UNIT_UMC,	\
304 	.srd_reg = 0x54,	\
305 	.srd_nents = 2,	\
306 	.srd_stride = 8	\
307 }
308 /*CSTYLED*/
309 #define	D_UMC_COLSEL_LO_DDR5	(const smn_reg_def_t){	\
310 	.srd_unit = SMN_UNIT_UMC,	\
311 	.srd_reg = 0x60,	\
312 	.srd_nents = 4,	\
313 	.srd_stride = 8	\
314 }
315 /*CSTYLED*/
316 #define	D_UMC_COLSEL_HI_DDR5	(const smn_reg_def_t){	\
317 	.srd_unit = SMN_UNIT_UMC,	\
318 	.srd_reg = 0x64,	\
319 	.srd_nents = 4,	\
320 	.srd_stride = 8	\
321 }
322 #define	UMC_COLSEL_LO_DDR4(u, i)	\
323     amdzen_umc_smn_reg(u, D_UMC_COLSEL_LO_DDR4, i)
324 #define	UMC_COLSEL_HI_DDR4(u, i)	\
325     amdzen_umc_smn_reg(u, D_UMC_COLSEL_HI_DDR4, i)
326 #define	UMC_COLSEL_LO_DDR5(u, i)	\
327     amdzen_umc_smn_reg(u, D_UMC_COLSEL_LO_DDR5, i)
328 #define	UMC_COLSEL_HI_DDR5(u, i)	\
329     amdzen_umc_smn_reg(u, D_UMC_COLSEL_HI_DDR5, i)
330 
331 #define	UMC_COLSEL_REMAP_GET_COL(r, x)	bitx32(r, (3 + (4 * (x))), (4 * ((x))))
332 #define	UMC_COLSEL_LO_BASE		2
333 #define	UMC_COLSEL_HI_BASE		8
334 
335 /*
336  * UMC::CH::RmSel -- This register contains the bits that determine how the rank
337  * is determined. Which fields of this are valid vary a lot in the different
338  * parts. The DDR4 and DDR5 versions are different enough that we use totally
339  * disjoint definitions. It's also worth noting that DDR5 doesn't have a
340  * secondary version of this as it is included in the main register.
341  *
342  * In general, APUs have some of the MSBS (most significant bit swap) related
343  * fields; however, they do not have rank multiplication bits.
344  */
345 /*CSTYLED*/
346 #define	D_UMC_RMSEL_DDR4	(const smn_reg_def_t){	\
347 	.srd_unit = SMN_UNIT_UMC,	\
348 	.srd_reg = 0x70,	\
349 	.srd_nents = 2	\
350 }
351 /*CSTYLED*/
352 #define	D_UMC_RMSEL_SEC_DDR4	(const smn_reg_def_t){	\
353 	.srd_unit = SMN_UNIT_UMC,	\
354 	.srd_reg = 0x78,	\
355 	.srd_nents = 2	\
356 }
357 #define	UMC_RMSEL_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_RMSEL_DDR4, i)
358 #define	UMC_RMSEL_SEC_DDR4(u, i)	\
359     amdzen_umc_smn_reg(u, D_UMC_RMSEL_SEC_DDR4, i)
360 #define	UMC_RMSEL_DDR4_GET_INV_MSBO(r)	bitx32(r, 19, 18)
361 #define	UMC_RMSEL_DDR4_GET_INV_MSBE(r)	bitx32(r, 17, 16)
362 #define	UMC_RMSEL_DDR4_GET_RM2(r)	bitx32(r, 11, 8)
363 #define	UMC_RMSEL_DDR4_GET_RM1(r)	bitx32(r, 7, 4)
364 #define	UMC_RMSEL_DDR4_GET_RM0(r)	bitx32(r, 3, 0)
365 #define	UMC_RMSEL_BASE			12
366 
367 /*CSTYLED*/
368 #define	D_UMC_RMSEL_DDR5	(const smn_reg_def_t){	\
369 	.srd_unit = SMN_UNIT_UMC,	\
370 	.srd_reg = 0x80,	\
371 	.srd_nents = 4	\
372 }
373 #define	UMC_RMSEL_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_RMSEL_DDR5, i)
374 #define	UMC_RMSEL_DDR5_GET_INV_MSBS_SEC(r)	bitx32(r, 31, 30)
375 #define	UMC_RMSEL_DDR5_GET_INV_MSBS(r)		bitx32(r, 29, 28)
376 #define	UMC_RMSEL_DDR5_GET_SUBCHAN(r)	bitx32(r, 19, 16)
377 #define	UMC_RMSEL_DDR5_SUBCHAN_BASE	5
378 #define	UMC_RMSEL_DDR5_GET_RM3(r)	bitx32(r, 15, 12)
379 #define	UMC_RMSEL_DDR5_GET_RM2(r)	bitx32(r, 11, 8)
380 #define	UMC_RMSEL_DDR5_GET_RM1(r)	bitx32(r, 7, 4)
381 #define	UMC_RMSEL_DDR5_GET_RM0(r)	bitx32(r, 3, 0)
382 
383 
384 /*
385  * UMC::CH::DimmCfg -- This describes several properties of the DIMM that is
386  * installed, such as its overall width or type.
387  */
388 /*CSTYLED*/
389 #define	D_UMC_DIMMCFG_DDR4	(const smn_reg_def_t){	\
390 	.srd_unit = SMN_UNIT_UMC,	\
391 	.srd_reg = 0x80,	\
392 	.srd_nents = 2	\
393 }
394 /*CSTYLED*/
395 #define	D_UMC_DIMMCFG_DDR5	(const smn_reg_def_t){	\
396 	.srd_unit = SMN_UNIT_UMC,	\
397 	.srd_reg = 0x90,	\
398 	.srd_nents = 2	\
399 }
400 #define	UMC_DIMMCFG_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_DIMMCFG_DDR4, i)
401 #define	UMC_DIMMCFG_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_DIMMCFG_DDR5, i)
402 #define	UMC_DIMMCFG_GET_PKG_RALIGN(r)	bitx32(r, 10, 10)
403 #define	UMC_DIMMCFG_GET_REFRESH_DIS(r)	bitx32(r, 9, 9)
404 #define	UMC_DIMMCFG_GET_DQ_SWAP_DIS(r)	bitx32(r, 8, 8)
405 #define	UMC_DIMMCFG_GET_X16(r)		bitx32(r, 7, 7)
406 #define	UMC_DIMMCFG_GET_X4(r)		bitx32(r, 6, 6)
407 #define	UMC_DIMMCFG_GET_LRDIMM(r)	bitx32(r, 5, 5)
408 #define	UMC_DIMMCFG_GET_RDIMM(r)	bitx32(r, 4, 4)
409 #define	UMC_DIMMCFG_GET_CISCS(r)	bitx32(r, 3, 3)
410 #define	UMC_DIMMCFG_GET_3DS(r)		bitx32(r, 2, 2)
411 
412 #define	UMC_DIMMCFG_DDR4_GET_NVDIMMP(r)	bitx32(r, 12, 12)
413 #define	UMC_DIMMCFG_DDR4_GET_DDR4e(r)	bitx32(r, 11, 11)
414 #define	UMC_DIMMCFG_DDR5_GET_RALIGN(r)	bitx32(r, 13, 12)
415 #define	UMC_DIMMCFG_DDR5_GET_ASYM(r)	bitx32(r, 11, 11)
416 
417 #define	UMC_DIMMCFG_DDR4_GET_OUTPUT_INV(r)	bitx32(r, 1, 1)
418 #define	UMC_DIMMCFG_DDR4_GET_MRS_MIRROR(r)	bitx32(r, 0, 0)
419 
420 /*
421  * UMC::CH::AddrHashBank -- These registers contain various instructions about
422  * how to hash an address across a bank to influence which bank is used.
423  */
424 /*CSTYLED*/
425 #define	D_UMC_BANK_HASH_DDR4	(const smn_reg_def_t){	\
426 	.srd_unit = SMN_UNIT_UMC,	\
427 	.srd_reg = 0xc8,	\
428 	.srd_nents = 5	\
429 }
430 /*CSTYLED*/
431 #define	D_UMC_BANK_HASH_DDR5	(const smn_reg_def_t){	\
432 	.srd_unit = SMN_UNIT_UMC,	\
433 	.srd_reg = 0x98,	\
434 	.srd_nents = 5	\
435 }
436 #define	UMC_BANK_HASH_DDR4(u, i)	\
437     amdzen_umc_smn_reg(u, D_UMC_BANK_HASH_DDR4, i)
438 #define	UMC_BANK_HASH_DDR5(u, i)	\
439     amdzen_umc_smn_reg(u, D_UMC_BANK_HASH_DDR5, i)
440 #define	UMC_BANK_HASH_GET_ROW(r)	bitx32(r, 31, 14)
441 #define	UMC_BANK_HASH_GET_COL(r)	bitx32(r, 13, 1)
442 #define	UMC_BANK_HASH_GET_EN(r)		bitx32(r, 0, 0)
443 
444 /*
445  * UMC::CH::AddrHashRM -- This hash register describes how to transform a UMC
446  * address when trying to do rank hashing. Note, instance 3 is is reserved in
447  * DDR5 modes.
448  */
449 /*CSTYLED*/
450 #define	D_UMC_RANK_HASH_DDR4	(const smn_reg_def_t){	\
451 	.srd_unit = SMN_UNIT_UMC,	\
452 	.srd_reg = 0xdc,	\
453 	.srd_nents = 3	\
454 }
455 /*CSTYLED*/
456 #define	D_UMC_RANK_HASH_DDR5	(const smn_reg_def_t){	\
457 	.srd_unit = SMN_UNIT_UMC,	\
458 	.srd_reg = 0xb0,	\
459 	.srd_nents = 4	\
460 }
461 #define	UMC_RANK_HASH_DDR4(u, i)	\
462     amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_DDR4, i)
463 #define	UMC_RANK_HASH_DDR5(u, i)	\
464     amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_DDR5, i)
465 #define	UMC_RANK_HASH_GET_ADDR(r)	bitx32(r, 31, 1)
466 #define	UMC_RANK_HASH_SHIFT		9
467 #define	UMC_RANK_HASH_GET_EN(r)		bitx32(r, 0, 0)
468 
469 /*
470  * UMC::AddrHashRMExt -- Extended rank hash addresses.
471  */
472 /*CSTYLED*/
473 #define	D_UMC_RANK_HASH_EXT_DDR5	(const smn_reg_def_t){	\
474 	.srd_unit = SMN_UNIT_UMC,	\
475 	.srd_reg = 0xbb0,	\
476 	.srd_nents = 4	\
477 }
478 #define	UMC_RANK_HASH_EXT_DDR5(u, i)	\
479     amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_EXT_DDR5, i)
480 #define	UMC_RANK_HASH_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
481 #define	UMC_RANK_HASH_EXT_ADDR_SHIFT	40
482 
483 /*
484  * UMC::CH::AddrHashPC, UMC::CH::AddrHashPC2 -- These registers describe a hash
485  * to use for the DDR5 sub-channel. Note, in the DDR4 case this is actually the
486  * upper two rank hash registers defined above because on the systems where this
487  * occurs for DDR4, they only have up to one rank hash.
488  */
489 /*CSTYLED*/
490 #define	D_UMC_PC_HASH_DDR5	(const smn_reg_def_t){	\
491 	.srd_unit = SMN_UNIT_UMC,	\
492 	.srd_reg = 0xc0	\
493 }
494 /*CSTYLED*/
495 #define	D_UMC_PC_HASH2_DDR5	(const smn_reg_def_t){	\
496 	.srd_unit = SMN_UNIT_UMC,	\
497 	.srd_reg = 0xc4	\
498 }
499 #define	UMC_PC_HASH_DDR4(u)	UMC_RANK_HASH_DDR4(u, 1)
500 #define	UMC_PC_HASH2_DDR4(u)	UMC_RANK_HASH_DDR4(u, 2)
501 #define	UMC_PC_HASH_DDR5(u)	amdzen_umc_smn_reg(u, D_UMC_PC_HASH_DDR5, 0)
502 #define	UMC_PC_HASH2_DDR5(u)	amdzen_umc_smn_reg(u, D_UMC_PC_HASH2_DDR5, 0)
503 #define	UMC_PC_HASH_GET_ROW(r)		bitx32(r, 31, 14)
504 #define	UMC_PC_HASH_GET_COL(r)		bitx32(r, 13, 1)
505 #define	UMC_PC_HASH_GET_EN(r)		bitx32(r, 0, 0)
506 #define	UMC_PC_HASH2_GET_BANK(r)	bitx32(r, 4, 0)
507 
508 /*
509  * UMC::CH::AddrHashCS -- Hashing: chip-select edition. Note, these can
510  * ultimately cause you to change which DIMM is being actually accessed.
511  */
512 /*CSTYLED*/
513 #define	D_UMC_CS_HASH_DDR4	(const smn_reg_def_t){	\
514 	.srd_unit = SMN_UNIT_UMC,	\
515 	.srd_reg = 0xe8,	\
516 	.srd_nents = 2	\
517 }
518 /*CSTYLED*/
519 #define	D_UMC_CS_HASH_DDR5	(const smn_reg_def_t){	\
520 	.srd_unit = SMN_UNIT_UMC,	\
521 	.srd_reg = 0xc8,	\
522 	.srd_nents = 2	\
523 }
524 #define	UMC_CS_HASH_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_CS_HASH_DDR4, i)
525 #define	UMC_CS_HASH_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_CS_HASH_DDR5, i)
526 #define	UMC_CS_HASH_GET_ADDR(r)		bitx32(r, 31, 1)
527 #define	UMC_CS_HASH_SHIFT		9
528 #define	UMC_CS_HASH_GET_EN(r)		bitx32(r, 0, 0)
529 
530 /*
531  * UMC::AddrHashExtCS -- Extended chip-select hash addresses.
532  */
533 /*CSTYLED*/
534 #define	D_UMC_CS_HASH_EXT_DDR5	(const smn_reg_def_t){	\
535 	.srd_unit = SMN_UNIT_UMC,	\
536 	.srd_reg = 0xbc8,	\
537 	.srd_nents = 2	\
538 }
539 #define	UMC_CS_HASH_EXT_DDR5(u, i)	\
540     amdzen_umc_smn_reg(u, D_UMC_CS_HASH_EXT_DDR5, i)
541 #define	UMC_CS_HASH_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
542 #define	UMC_CS_HASH_EXT_ADDR_SHIFT	40
543 
544 /*
545  * UMC::CH::UmcConfig -- This register controls various features of the device.
546  * For our purposes we mostly care about seeing if ECC is enabled and a DIMM
547  * type.
548  */
549 /*CSTYLED*/
550 #define	D_UMC_UMCCFG	(const smn_reg_def_t){	\
551 	.srd_unit = SMN_UNIT_UMC,	\
552 	.srd_reg = 0x100	\
553 }
554 #define	UMC_UMCCFG(u)	amdzen_umc_smn_reg(u, D_UMC_UMCCFG, 0)
555 #define	UMC_UMCCFG_GET_READY(r)		bitx32(r, 31, 31)
556 #define	UMC_UMCCFG_GET_ECC_EN(r)	bitx32(r, 12, 12)
557 #define	UMC_UMCCFG_GET_BURST_CTL(r)	bitx32(r, 11, 10)
558 #define	UMC_UMCCFG_GET_BURST_LEN(r)	bitx32(r, 9, 8)
559 #define	UMC_UMCCFG_GET_DDR_TYPE(r)	bitx32(r, 2, 0)
560 #define	UMC_UMCCFG_DDR4_T_DDR4		0
561 #define	UMC_UMCCFG_DDR4_T_LPDDR4	5
562 
563 #define	UMC_UMCCFG_DDR5_T_DDR4		0
564 #define	UMC_UMCCFG_DDR5_T_DDR5		1
565 #define	UMC_UMCCFG_DDR5_T_LPDDR4	5
566 #define	UMC_UMCCFG_DDR5_T_LPDDR5	6
567 
568 /*
569  * UMC::CH::DataCtrl -- Various settings around whether data encryption or
570  * scrambling is enabled. Note, this register really changes a bunch from family
571  * to family.
572  */
573 /*CSTYLED*/
574 #define	D_UMC_DATACTL	(const smn_reg_def_t){	\
575 	.srd_unit = SMN_UNIT_UMC,	\
576 	.srd_reg = 0x144	\
577 }
578 #define	UMC_DATACTL(u)		amdzen_umc_smn_reg(u, D_UMC_DATACTL, 0)
579 #define	UMC_DATACTL_GET_ENCR_EN(r)	bitx32(r, 8, 8)
580 #define	UMC_DATACTL_GET_SCRAM_EN(r)	bitx32(r, 0, 0)
581 
582 #define	UMC_DATACTL_DDR4_GET_TWEAK(r)		bitx32(r, 19, 16)
583 #define	UMC_DATACTL_DDR4_GET_VMG2M(r)		bitx32(r, 12, 12)
584 #define	UMC_DATACTL_DDR4_GET_FORCE_ENCR(r)	bitx32(r, 11, 11)
585 
586 #define	UMC_DATACTL_DDR5_GET_TWEAK(r)	bitx32(r, 16, 16)
587 #define	UMC_DATACTL_DDR5_GET_XTS(r)	bitx32(r, 14, 14)
588 #define	UMC_DATACTL_DDR5_GET_AES256(r)	bitx32(r, 13, 13)
589 
590 /*
591  * UMC::CH:EccCtrl -- Various settings around how ECC operates.
592  */
593 /*CSTYLED*/
594 #define	D_UMC_ECCCTL	(const smn_reg_def_t){	\
595 	.srd_unit = SMN_UNIT_UMC,	\
596 	.srd_reg = 0x14c	\
597 }
598 #define	UMC_ECCCTL(u)	amdzen_umc_smn_reg(u, D_UMC_ECCCTL, 0)
599 #define	UMC_ECCCTL_GET_RD_EN(r)		bitx32(x, 10, 10)
600 #define	UMC_ECCCTL_GET_X16(r)		bitx32(x, 9, 9)
601 #define	UMC_ECCCTL_GET_UC_FATAL(r)	bitx32(x, 8, 8)
602 #define	UMC_ECCCTL_GET_SYM_SIZE(r)	bitx32(x, 7, 7)
603 #define	UMC_ECCCTL_GET_BIT_IL(r)	bitx32(x, 6, 6)
604 #define	UMC_ECCCTL_GET_HIST_EN(r)	bitx32(x, 5, 5)
605 #define	UMC_ECCCTL_GET_SW_SYM_EN(r)	bitx32(x, 4, 4)
606 #define	UMC_ECCCTL_GET_WR_EN(r)		bitx32(x, 0, 0)
607 
608 /*
609  * Note, while this group appears generic and is the same in both DDR4/DDR5
610  * systems, this is not always present on every SoC and seems to depend on
611  * something else inside the chip.
612  */
613 #define	UMC_ECCCTL_DDR_GET_PI(r)	bitx32(r, 13, 13)
614 #define	UMC_ECCCTL_DDR_GET_PF_DIS(r)	bitx32(r, 12, 12)
615 #define	UMC_ECCCTL_DDR_GET_SDP_OVR(r)	bitx32(x, 11, 11)
616 #define	UMC_ECCCTL_DDR_GET_REPLAY_EN(r)	bitx32(x, 1, 1)
617 
618 #define	UMC_ECCCTL_DDR5_GET_PIN_RED(r)	bitx32(r, 14, 14)
619 
620 /*
621  * UMC::Ch::UmcCap, UMC::CH::UmcCapHi -- Various capability registers and
622  * feature disables. We mostly just record these for future us for debugging
623  * purposes. They aren't used as part of memory decoding.
624  */
625 /*CSTYLED*/
626 #define	D_UMC_UMCCAP	(const smn_reg_def_t){	\
627 	.srd_unit = SMN_UNIT_UMC,	\
628 	.srd_reg = 0xdf0	\
629 }
630 /*CSTYLED*/
631 #define	D_UMC_UMCCAP_HI	(const smn_reg_def_t){	\
632 	.srd_unit = SMN_UNIT_UMC,	\
633 	.srd_reg = 0xdf4	\
634 }
635 #define	UMC_UMCCAP(u)		amdzen_umc_smn_reg(u, D_UMC_UMCCAP, 0)
636 #define	UMC_UMCCAP_HI(u)	amdzen_umc_smn_reg(u, D_UMC_UMCCAP_HI, 0)
637 
638 #ifdef __cplusplus
639 }
640 #endif
641 
642 #endif /* _SYS_UMC_H */
643