xref: /linux/drivers/ras/amd/atl/map.c (revision b7e1e969c887c897947fdc3754fe9b0c24acb155)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Address Translation Library
4  *
5  * map.c : Functions to read and decode DRAM address maps
6  *
7  * Copyright (c) 2023, Advanced Micro Devices, Inc.
8  * All Rights Reserved.
9  *
10  * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
11  */
12 
13 #include "internal.h"
14 
15 static int df2_get_intlv_mode(struct addr_ctx *ctx)
16 {
17 	ctx->map.intlv_mode = FIELD_GET(DF2_INTLV_NUM_CHAN, ctx->map.base);
18 
19 	if (ctx->map.intlv_mode == 8)
20 		ctx->map.intlv_mode = DF2_2CHAN_HASH;
21 
22 	if (ctx->map.intlv_mode != NONE &&
23 	    ctx->map.intlv_mode != NOHASH_2CHAN &&
24 	    ctx->map.intlv_mode != DF2_2CHAN_HASH)
25 		return -EINVAL;
26 
27 	return 0;
28 }
29 
30 static int df3_get_intlv_mode(struct addr_ctx *ctx)
31 {
32 	ctx->map.intlv_mode = FIELD_GET(DF3_INTLV_NUM_CHAN, ctx->map.base);
33 	return 0;
34 }
35 
36 static int df3p5_get_intlv_mode(struct addr_ctx *ctx)
37 {
38 	ctx->map.intlv_mode = FIELD_GET(DF3p5_INTLV_NUM_CHAN, ctx->map.base);
39 
40 	if (ctx->map.intlv_mode == DF3_6CHAN)
41 		return -EINVAL;
42 
43 	return 0;
44 }
45 
46 static int df4_get_intlv_mode(struct addr_ctx *ctx)
47 {
48 	ctx->map.intlv_mode = FIELD_GET(DF4_INTLV_NUM_CHAN, ctx->map.intlv);
49 
50 	if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH ||
51 	    ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH ||
52 	    ctx->map.intlv_mode == DF3_COD1_8CHAN_HASH ||
53 	    ctx->map.intlv_mode == DF3_6CHAN)
54 		return -EINVAL;
55 
56 	return 0;
57 }
58 
59 static int df4p5_get_intlv_mode(struct addr_ctx *ctx)
60 {
61 	ctx->map.intlv_mode = FIELD_GET(DF4p5_INTLV_NUM_CHAN, ctx->map.intlv);
62 
63 	if (ctx->map.intlv_mode <= NOHASH_32CHAN)
64 		return 0;
65 
66 	if (ctx->map.intlv_mode >= MI3_HASH_8CHAN &&
67 	    ctx->map.intlv_mode <= MI3_HASH_32CHAN)
68 		return 0;
69 
70 	/*
71 	 * Modes matching the ranges above are returned as-is.
72 	 *
73 	 * All other modes are "fixed up" by adding 20h to make a unique value.
74 	 */
75 	ctx->map.intlv_mode += 0x20;
76 
77 	return 0;
78 }
79 
80 static int get_intlv_mode(struct addr_ctx *ctx)
81 {
82 	int ret;
83 
84 	switch (df_cfg.rev) {
85 	case DF2:
86 		ret = df2_get_intlv_mode(ctx);
87 		break;
88 	case DF3:
89 		ret = df3_get_intlv_mode(ctx);
90 		break;
91 	case DF3p5:
92 		ret = df3p5_get_intlv_mode(ctx);
93 		break;
94 	case DF4:
95 		ret = df4_get_intlv_mode(ctx);
96 		break;
97 	case DF4p5:
98 		ret = df4p5_get_intlv_mode(ctx);
99 		break;
100 	default:
101 		ret = -EINVAL;
102 	}
103 
104 	if (ret)
105 		atl_debug_on_bad_df_rev();
106 
107 	return ret;
108 }
109 
110 static u64 get_hi_addr_offset(u32 reg_dram_offset)
111 {
112 	u8 shift = DF_DRAM_BASE_LIMIT_LSB;
113 	u64 hi_addr_offset;
114 
115 	switch (df_cfg.rev) {
116 	case DF2:
117 		hi_addr_offset = FIELD_GET(DF2_HI_ADDR_OFFSET, reg_dram_offset);
118 		break;
119 	case DF3:
120 	case DF3p5:
121 		hi_addr_offset = FIELD_GET(DF3_HI_ADDR_OFFSET, reg_dram_offset);
122 		break;
123 	case DF4:
124 	case DF4p5:
125 		hi_addr_offset = FIELD_GET(DF4_HI_ADDR_OFFSET, reg_dram_offset);
126 		break;
127 	default:
128 		hi_addr_offset = 0;
129 		atl_debug_on_bad_df_rev();
130 	}
131 
132 	if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
133 		shift = MI300_DRAM_LIMIT_LSB;
134 
135 	return hi_addr_offset << shift;
136 }
137 
138 /*
139  * Returns:	0 if offset is disabled.
140  *		1 if offset is enabled.
141  *		-EINVAL on error.
142  */
143 static int get_dram_offset(struct addr_ctx *ctx, u64 *norm_offset)
144 {
145 	u32 reg_dram_offset;
146 	u8 map_num;
147 
148 	/* Should not be called for map 0. */
149 	if (!ctx->map.num) {
150 		atl_debug(ctx, "Trying to find DRAM offset for map 0");
151 		return -EINVAL;
152 	}
153 
154 	/*
155 	 * DramOffset registers don't exist for map 0, so the base register
156 	 * actually refers to map 1.
157 	 * Adjust the map_num for the register offsets.
158 	 */
159 	map_num = ctx->map.num - 1;
160 
161 	if (df_cfg.rev >= DF4) {
162 		/* Read D18F7x140 (DramOffset) */
163 		if (df_indirect_read_instance(ctx->node_id, 7, 0x140 + (4 * map_num),
164 					      ctx->inst_id, &reg_dram_offset))
165 			return -EINVAL;
166 
167 	} else {
168 		/* Read D18F0x1B4 (DramOffset) */
169 		if (df_indirect_read_instance(ctx->node_id, 0, 0x1B4 + (4 * map_num),
170 					      ctx->inst_id, &reg_dram_offset))
171 			return -EINVAL;
172 	}
173 
174 	if (!FIELD_GET(DF_HI_ADDR_OFFSET_EN, reg_dram_offset))
175 		return 0;
176 
177 	*norm_offset = get_hi_addr_offset(reg_dram_offset);
178 
179 	return 1;
180 }
181 
182 static int df3_6ch_get_dram_addr_map(struct addr_ctx *ctx)
183 {
184 	u16 dst_fabric_id = FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
185 	u8 i, j, shift = 4, mask = 0xF;
186 	u32 reg, offset = 0x60;
187 	u16 dst_node_id;
188 
189 	/* Get Socket 1 register. */
190 	if (dst_fabric_id & df_cfg.socket_id_mask)
191 		offset = 0x68;
192 
193 	/* Read D18F0x06{0,8} (DF::Skt0CsTargetRemap0)/(DF::Skt0CsTargetRemap1) */
194 	if (df_indirect_read_broadcast(ctx->node_id, 0, offset, &reg))
195 		return -EINVAL;
196 
197 	/* Save 8 remap entries. */
198 	for (i = 0, j = 0; i < 8; i++, j++)
199 		ctx->map.remap_array[i] = (reg >> (j * shift)) & mask;
200 
201 	dst_node_id = dst_fabric_id & df_cfg.node_id_mask;
202 	dst_node_id >>= df_cfg.node_id_shift;
203 
204 	/* Read D18F2x090 (DF::Np2ChannelConfig) */
205 	if (df_indirect_read_broadcast(dst_node_id, 2, 0x90, &reg))
206 		return -EINVAL;
207 
208 	ctx->map.np2_bits = FIELD_GET(DF_LOG2_ADDR_64K_SPACE0, reg);
209 	return 0;
210 }
211 
212 static int df2_get_dram_addr_map(struct addr_ctx *ctx)
213 {
214 	/* Read D18F0x110 (DramBaseAddress). */
215 	if (df_indirect_read_instance(ctx->node_id, 0, 0x110 + (8 * ctx->map.num),
216 				      ctx->inst_id, &ctx->map.base))
217 		return -EINVAL;
218 
219 	/* Read D18F0x114 (DramLimitAddress). */
220 	if (df_indirect_read_instance(ctx->node_id, 0, 0x114 + (8 * ctx->map.num),
221 				      ctx->inst_id, &ctx->map.limit))
222 		return -EINVAL;
223 
224 	return 0;
225 }
226 
227 static int df3_get_dram_addr_map(struct addr_ctx *ctx)
228 {
229 	if (df2_get_dram_addr_map(ctx))
230 		return -EINVAL;
231 
232 	/* Read D18F0x3F8 (DfGlobalCtl). */
233 	if (df_indirect_read_instance(ctx->node_id, 0, 0x3F8,
234 				      ctx->inst_id, &ctx->map.ctl))
235 		return -EINVAL;
236 
237 	return 0;
238 }
239 
240 static int df4_get_dram_addr_map(struct addr_ctx *ctx)
241 {
242 	u8 remap_sel, i, j, shift = 4, mask = 0xF;
243 	u32 remap_reg;
244 
245 	/* Read D18F7xE00 (DramBaseAddress). */
246 	if (df_indirect_read_instance(ctx->node_id, 7, 0xE00 + (16 * ctx->map.num),
247 				      ctx->inst_id, &ctx->map.base))
248 		return -EINVAL;
249 
250 	/* Read D18F7xE04 (DramLimitAddress). */
251 	if (df_indirect_read_instance(ctx->node_id, 7, 0xE04 + (16 * ctx->map.num),
252 				      ctx->inst_id, &ctx->map.limit))
253 		return -EINVAL;
254 
255 	/* Read D18F7xE08 (DramAddressCtl). */
256 	if (df_indirect_read_instance(ctx->node_id, 7, 0xE08 + (16 * ctx->map.num),
257 				      ctx->inst_id, &ctx->map.ctl))
258 		return -EINVAL;
259 
260 	/* Read D18F7xE0C (DramAddressIntlv). */
261 	if (df_indirect_read_instance(ctx->node_id, 7, 0xE0C + (16 * ctx->map.num),
262 				      ctx->inst_id, &ctx->map.intlv))
263 		return -EINVAL;
264 
265 	/* Check if Remap Enable bit is valid. */
266 	if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
267 		return 0;
268 
269 	/* Fill with bogus values, because '0' is a valid value. */
270 	memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
271 
272 	/* Get Remap registers. */
273 	remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl);
274 
275 	/* Read D18F7x180 (CsTargetRemap0A). */
276 	if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (8 * remap_sel),
277 				      ctx->inst_id, &remap_reg))
278 		return -EINVAL;
279 
280 	/* Save first 8 remap entries. */
281 	for (i = 0, j = 0; i < 8; i++, j++)
282 		ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
283 
284 	/* Read D18F7x184 (CsTargetRemap0B). */
285 	if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (8 * remap_sel),
286 				      ctx->inst_id, &remap_reg))
287 		return -EINVAL;
288 
289 	/* Save next 8 remap entries. */
290 	for (i = 8, j = 0; i < 16; i++, j++)
291 		ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
292 
293 	return 0;
294 }
295 
296 static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
297 {
298 	u8 remap_sel, i, j, shift = 5, mask = 0x1F;
299 	u32 remap_reg;
300 
301 	/* Read D18F7x200 (DramBaseAddress). */
302 	if (df_indirect_read_instance(ctx->node_id, 7, 0x200 + (16 * ctx->map.num),
303 				      ctx->inst_id, &ctx->map.base))
304 		return -EINVAL;
305 
306 	/* Read D18F7x204 (DramLimitAddress). */
307 	if (df_indirect_read_instance(ctx->node_id, 7, 0x204 + (16 * ctx->map.num),
308 				      ctx->inst_id, &ctx->map.limit))
309 		return -EINVAL;
310 
311 	/* Read D18F7x208 (DramAddressCtl). */
312 	if (df_indirect_read_instance(ctx->node_id, 7, 0x208 + (16 * ctx->map.num),
313 				      ctx->inst_id, &ctx->map.ctl))
314 		return -EINVAL;
315 
316 	/* Read D18F7x20C (DramAddressIntlv). */
317 	if (df_indirect_read_instance(ctx->node_id, 7, 0x20C + (16 * ctx->map.num),
318 				      ctx->inst_id, &ctx->map.intlv))
319 		return -EINVAL;
320 
321 	/* Check if Remap Enable bit is valid. */
322 	if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
323 		return 0;
324 
325 	/* Fill with bogus values, because '0' is a valid value. */
326 	memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
327 
328 	/* Get Remap registers. */
329 	remap_sel = FIELD_GET(DF4p5_REMAP_SEL, ctx->map.ctl);
330 
331 	/* Read D18F7x180 (CsTargetRemap0A). */
332 	if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (24 * remap_sel),
333 				      ctx->inst_id, &remap_reg))
334 		return -EINVAL;
335 
336 	/* Save first 6 remap entries. */
337 	for (i = 0, j = 0; i < 6; i++, j++)
338 		ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
339 
340 	/* Read D18F7x184 (CsTargetRemap0B). */
341 	if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (24 * remap_sel),
342 				      ctx->inst_id, &remap_reg))
343 		return -EINVAL;
344 
345 	/* Save next 6 remap entries. */
346 	for (i = 6, j = 0; i < 12; i++, j++)
347 		ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
348 
349 	/* Read D18F7x188 (CsTargetRemap0C). */
350 	if (df_indirect_read_instance(ctx->node_id, 7, 0x188 + (24 * remap_sel),
351 				      ctx->inst_id, &remap_reg))
352 		return -EINVAL;
353 
354 	/* Save next 6 remap entries. */
355 	for (i = 12, j = 0; i < 18; i++, j++)
356 		ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
357 
358 	return 0;
359 }
360 
361 static int get_dram_addr_map(struct addr_ctx *ctx)
362 {
363 	switch (df_cfg.rev) {
364 	case DF2:	return df2_get_dram_addr_map(ctx);
365 	case DF3:
366 	case DF3p5:	return df3_get_dram_addr_map(ctx);
367 	case DF4:	return df4_get_dram_addr_map(ctx);
368 	case DF4p5:	return df4p5_get_dram_addr_map(ctx);
369 	default:
370 			atl_debug_on_bad_df_rev();
371 			return -EINVAL;
372 	}
373 }
374 
375 static int get_coh_st_fabric_id(struct addr_ctx *ctx)
376 {
377 	u32 reg;
378 
379 	/*
380 	 * On MI300 systems, the Coherent Station Fabric ID is derived
381 	 * later. And it does not depend on the register value.
382 	 */
383 	if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
384 		return 0;
385 
386 	/* Read D18F0x50 (FabricBlockInstanceInformation3). */
387 	if (df_indirect_read_instance(ctx->node_id, 0, 0x50, ctx->inst_id, &reg))
388 		return -EINVAL;
389 
390 	if (df_cfg.rev < DF4p5)
391 		ctx->coh_st_fabric_id = FIELD_GET(DF2_COH_ST_FABRIC_ID, reg);
392 	else
393 		ctx->coh_st_fabric_id = FIELD_GET(DF4p5_COH_ST_FABRIC_ID, reg);
394 
395 	return 0;
396 }
397 
398 static int find_normalized_offset(struct addr_ctx *ctx, u64 *norm_offset)
399 {
400 	u64 last_offset = 0;
401 	int ret;
402 
403 	for (ctx->map.num = 1; ctx->map.num < df_cfg.num_coh_st_maps; ctx->map.num++) {
404 		ret = get_dram_offset(ctx, norm_offset);
405 		if (ret < 0)
406 			return ret;
407 
408 		/* Continue search if this map's offset is not enabled. */
409 		if (!ret)
410 			continue;
411 
412 		/* Enabled offsets should never be 0. */
413 		if (*norm_offset == 0) {
414 			atl_debug(ctx, "Enabled map %u offset is 0", ctx->map.num);
415 			return -EINVAL;
416 		}
417 
418 		/* Offsets should always increase from one map to the next. */
419 		if (*norm_offset <= last_offset) {
420 			atl_debug(ctx, "Map %u offset (0x%016llx) <= previous (0x%016llx)",
421 				  ctx->map.num, *norm_offset, last_offset);
422 			return -EINVAL;
423 		}
424 
425 		/* Match if this map's offset is less than the current calculated address. */
426 		if (ctx->ret_addr >= *norm_offset)
427 			break;
428 
429 		last_offset = *norm_offset;
430 	}
431 
432 	/*
433 	 * Finished search without finding a match.
434 	 * Reset to map 0 and no offset.
435 	 */
436 	if (ctx->map.num >= df_cfg.num_coh_st_maps) {
437 		ctx->map.num = 0;
438 		*norm_offset = 0;
439 	}
440 
441 	return 0;
442 }
443 
444 static bool valid_map(struct addr_ctx *ctx)
445 {
446 	if (df_cfg.rev >= DF4)
447 		return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.ctl);
448 	else
449 		return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.base);
450 }
451 
452 static int get_address_map_common(struct addr_ctx *ctx)
453 {
454 	u64 norm_offset = 0;
455 
456 	if (get_coh_st_fabric_id(ctx))
457 		return -EINVAL;
458 
459 	if (find_normalized_offset(ctx, &norm_offset))
460 		return -EINVAL;
461 
462 	if (get_dram_addr_map(ctx))
463 		return -EINVAL;
464 
465 	if (!valid_map(ctx))
466 		return -EINVAL;
467 
468 	ctx->ret_addr -= norm_offset;
469 
470 	return 0;
471 }
472 
473 static u8 get_num_intlv_chan(struct addr_ctx *ctx)
474 {
475 	switch (ctx->map.intlv_mode) {
476 	case NONE:
477 		return 1;
478 	case NOHASH_2CHAN:
479 	case DF2_2CHAN_HASH:
480 	case DF3_COD4_2CHAN_HASH:
481 	case DF4_NPS4_2CHAN_HASH:
482 	case DF4p5_NPS4_2CHAN_1K_HASH:
483 	case DF4p5_NPS4_2CHAN_2K_HASH:
484 		return 2;
485 	case DF4_NPS4_3CHAN_HASH:
486 	case DF4p5_NPS4_3CHAN_1K_HASH:
487 	case DF4p5_NPS4_3CHAN_2K_HASH:
488 		return 3;
489 	case NOHASH_4CHAN:
490 	case DF3_COD2_4CHAN_HASH:
491 	case DF4_NPS2_4CHAN_HASH:
492 	case DF4p5_NPS2_4CHAN_1K_HASH:
493 	case DF4p5_NPS2_4CHAN_2K_HASH:
494 		return 4;
495 	case DF4_NPS2_5CHAN_HASH:
496 	case DF4p5_NPS2_5CHAN_1K_HASH:
497 	case DF4p5_NPS2_5CHAN_2K_HASH:
498 		return 5;
499 	case DF3_6CHAN:
500 	case DF4_NPS2_6CHAN_HASH:
501 	case DF4p5_NPS2_6CHAN_1K_HASH:
502 	case DF4p5_NPS2_6CHAN_2K_HASH:
503 		return 6;
504 	case NOHASH_8CHAN:
505 	case DF3_COD1_8CHAN_HASH:
506 	case DF4_NPS1_8CHAN_HASH:
507 	case MI3_HASH_8CHAN:
508 	case DF4p5_NPS1_8CHAN_1K_HASH:
509 	case DF4p5_NPS1_8CHAN_2K_HASH:
510 		return 8;
511 	case DF4_NPS1_10CHAN_HASH:
512 	case DF4p5_NPS1_10CHAN_1K_HASH:
513 	case DF4p5_NPS1_10CHAN_2K_HASH:
514 		return 10;
515 	case DF4_NPS1_12CHAN_HASH:
516 	case DF4p5_NPS1_12CHAN_1K_HASH:
517 	case DF4p5_NPS1_12CHAN_2K_HASH:
518 		return 12;
519 	case NOHASH_16CHAN:
520 	case MI3_HASH_16CHAN:
521 	case DF4p5_NPS1_16CHAN_1K_HASH:
522 	case DF4p5_NPS1_16CHAN_2K_HASH:
523 		return 16;
524 	case DF4p5_NPS0_24CHAN_1K_HASH:
525 	case DF4p5_NPS0_24CHAN_2K_HASH:
526 		return 24;
527 	case NOHASH_32CHAN:
528 	case MI3_HASH_32CHAN:
529 		return 32;
530 	default:
531 		atl_debug_on_bad_intlv_mode(ctx);
532 		return 0;
533 	}
534 }
535 
536 static void calculate_intlv_bits(struct addr_ctx *ctx)
537 {
538 	ctx->map.num_intlv_chan = get_num_intlv_chan(ctx);
539 
540 	ctx->map.total_intlv_chan = ctx->map.num_intlv_chan;
541 	ctx->map.total_intlv_chan *= ctx->map.num_intlv_dies;
542 	ctx->map.total_intlv_chan *= ctx->map.num_intlv_sockets;
543 
544 	/*
545 	 * Get the number of bits needed to cover this many channels.
546 	 * order_base_2() rounds up automatically.
547 	 */
548 	ctx->map.total_intlv_bits = order_base_2(ctx->map.total_intlv_chan);
549 }
550 
551 static u8 get_intlv_bit_pos(struct addr_ctx *ctx)
552 {
553 	u8 addr_sel = 0;
554 
555 	switch (df_cfg.rev) {
556 	case DF2:
557 		addr_sel = FIELD_GET(DF2_INTLV_ADDR_SEL, ctx->map.base);
558 		break;
559 	case DF3:
560 	case DF3p5:
561 		addr_sel = FIELD_GET(DF3_INTLV_ADDR_SEL, ctx->map.base);
562 		break;
563 	case DF4:
564 	case DF4p5:
565 		addr_sel = FIELD_GET(DF4_INTLV_ADDR_SEL, ctx->map.intlv);
566 		break;
567 	default:
568 		atl_debug_on_bad_df_rev();
569 		break;
570 	}
571 
572 	/* Add '8' to get the 'interleave bit position'. */
573 	return addr_sel + 8;
574 }
575 
576 static u8 get_num_intlv_dies(struct addr_ctx *ctx)
577 {
578 	u8 dies = 0;
579 
580 	switch (df_cfg.rev) {
581 	case DF2:
582 		dies = FIELD_GET(DF2_INTLV_NUM_DIES, ctx->map.limit);
583 		break;
584 	case DF3:
585 		dies = FIELD_GET(DF3_INTLV_NUM_DIES, ctx->map.base);
586 		break;
587 	case DF3p5:
588 		dies = FIELD_GET(DF3p5_INTLV_NUM_DIES, ctx->map.base);
589 		break;
590 	case DF4:
591 	case DF4p5:
592 		dies = FIELD_GET(DF4_INTLV_NUM_DIES, ctx->map.intlv);
593 		break;
594 	default:
595 		atl_debug_on_bad_df_rev();
596 		break;
597 	}
598 
599 	/* Register value is log2, e.g. 0 -> 1 die, 1 -> 2 dies, etc. */
600 	return 1 << dies;
601 }
602 
603 static u8 get_num_intlv_sockets(struct addr_ctx *ctx)
604 {
605 	u8 sockets = 0;
606 
607 	switch (df_cfg.rev) {
608 	case DF2:
609 		sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.limit);
610 		break;
611 	case DF3:
612 	case DF3p5:
613 		sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.base);
614 		break;
615 	case DF4:
616 	case DF4p5:
617 		sockets = FIELD_GET(DF4_INTLV_NUM_SOCKETS, ctx->map.intlv);
618 		break;
619 	default:
620 		atl_debug_on_bad_df_rev();
621 		break;
622 	}
623 
624 	/* Register value is log2, e.g. 0 -> 1 sockets, 1 -> 2 sockets, etc. */
625 	return 1 << sockets;
626 }
627 
628 static int get_global_map_data(struct addr_ctx *ctx)
629 {
630 	if (get_intlv_mode(ctx))
631 		return -EINVAL;
632 
633 	if (ctx->map.intlv_mode == DF3_6CHAN &&
634 	    df3_6ch_get_dram_addr_map(ctx))
635 		return -EINVAL;
636 
637 	ctx->map.intlv_bit_pos		= get_intlv_bit_pos(ctx);
638 	ctx->map.num_intlv_dies		= get_num_intlv_dies(ctx);
639 	ctx->map.num_intlv_sockets	= get_num_intlv_sockets(ctx);
640 	calculate_intlv_bits(ctx);
641 
642 	return 0;
643 }
644 
645 static void dump_address_map(struct dram_addr_map *map)
646 {
647 	u8 i;
648 
649 	pr_debug("intlv_mode=0x%x",		map->intlv_mode);
650 	pr_debug("num=0x%x",			map->num);
651 	pr_debug("base=0x%x",			map->base);
652 	pr_debug("limit=0x%x",			map->limit);
653 	pr_debug("ctl=0x%x",			map->ctl);
654 	pr_debug("intlv=0x%x",			map->intlv);
655 
656 	for (i = 0; i < MAX_COH_ST_CHANNELS; i++)
657 		pr_debug("remap_array[%u]=0x%x", i, map->remap_array[i]);
658 
659 	pr_debug("intlv_bit_pos=%u",		map->intlv_bit_pos);
660 	pr_debug("num_intlv_chan=%u",		map->num_intlv_chan);
661 	pr_debug("num_intlv_dies=%u",		map->num_intlv_dies);
662 	pr_debug("num_intlv_sockets=%u",	map->num_intlv_sockets);
663 	pr_debug("total_intlv_chan=%u",		map->total_intlv_chan);
664 	pr_debug("total_intlv_bits=%u",		map->total_intlv_bits);
665 }
666 
667 int get_address_map(struct addr_ctx *ctx)
668 {
669 	int ret;
670 
671 	ret = get_address_map_common(ctx);
672 	if (ret)
673 		return ret;
674 
675 	ret = get_global_map_data(ctx);
676 	if (ret)
677 		return ret;
678 
679 	dump_address_map(&ctx->map);
680 
681 	return ret;
682 }
683