xref: /linux/drivers/ras/amd/atl/core.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Address Translation Library
4  *
5  * core.c : Module init and base translation functions
6  *
7  * Copyright (c) 2023, Advanced Micro Devices, Inc.
8  * All Rights Reserved.
9  *
10  * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
11  */
12 
13 #include <linux/module.h>
14 #include <asm/cpu_device_id.h>
15 
16 #include "internal.h"
17 
18 struct df_config df_cfg __read_mostly;
19 
20 static int addr_over_limit(struct addr_ctx *ctx)
21 {
22 	u64 dram_limit_addr;
23 
24 	if (df_cfg.rev >= DF4)
25 		dram_limit_addr = FIELD_GET(DF4_DRAM_LIMIT_ADDR, ctx->map.limit);
26 	else
27 		dram_limit_addr = FIELD_GET(DF2_DRAM_LIMIT_ADDR, ctx->map.limit);
28 
29 	dram_limit_addr <<= DF_DRAM_BASE_LIMIT_LSB;
30 	dram_limit_addr |= GENMASK(DF_DRAM_BASE_LIMIT_LSB - 1, 0);
31 
32 	/* Is calculated system address above DRAM limit address? */
33 	if (ctx->ret_addr > dram_limit_addr) {
34 		atl_debug(ctx, "Calculated address (0x%016llx) > DRAM limit (0x%016llx)",
35 			  ctx->ret_addr, dram_limit_addr);
36 		return -EINVAL;
37 	}
38 
39 	return 0;
40 }
41 
42 static bool legacy_hole_en(struct addr_ctx *ctx)
43 {
44 	u32 reg = ctx->map.base;
45 
46 	if (df_cfg.rev >= DF4)
47 		reg = ctx->map.ctl;
48 
49 	return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg);
50 }
51 
52 static int add_legacy_hole(struct addr_ctx *ctx)
53 {
54 	u32 dram_hole_base;
55 	u8 func = 0;
56 
57 	if (!legacy_hole_en(ctx))
58 		return 0;
59 
60 	if (df_cfg.rev >= DF4)
61 		func = 7;
62 
63 	if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base))
64 		return -EINVAL;
65 
66 	dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
67 
68 	if (ctx->ret_addr >= dram_hole_base)
69 		ctx->ret_addr += (BIT_ULL(32) - dram_hole_base);
70 
71 	return 0;
72 }
73 
74 static u64 get_base_addr(struct addr_ctx *ctx)
75 {
76 	u64 base_addr;
77 
78 	if (df_cfg.rev >= DF4)
79 		base_addr = FIELD_GET(DF4_BASE_ADDR, ctx->map.base);
80 	else
81 		base_addr = FIELD_GET(DF2_BASE_ADDR, ctx->map.base);
82 
83 	return base_addr << DF_DRAM_BASE_LIMIT_LSB;
84 }
85 
86 static int add_base_and_hole(struct addr_ctx *ctx)
87 {
88 	ctx->ret_addr += get_base_addr(ctx);
89 
90 	if (add_legacy_hole(ctx))
91 		return -EINVAL;
92 
93 	return 0;
94 }
95 
96 static bool late_hole_remove(struct addr_ctx *ctx)
97 {
98 	if (df_cfg.rev == DF3p5)
99 		return true;
100 
101 	if (df_cfg.rev == DF4)
102 		return true;
103 
104 	if (ctx->map.intlv_mode == DF3_6CHAN)
105 		return true;
106 
107 	return false;
108 }
109 
110 unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr)
111 {
112 	struct addr_ctx ctx;
113 
114 	if (df_cfg.rev == UNKNOWN)
115 		return -EINVAL;
116 
117 	memset(&ctx, 0, sizeof(ctx));
118 
119 	/* Start from the normalized address */
120 	ctx.ret_addr = addr;
121 	ctx.inst_id = coh_st_inst_id;
122 
123 	ctx.inputs.norm_addr = addr;
124 	ctx.inputs.socket_id = socket_id;
125 	ctx.inputs.die_id = die_id;
126 	ctx.inputs.coh_st_inst_id = coh_st_inst_id;
127 
128 	if (determine_node_id(&ctx, socket_id, die_id))
129 		return -EINVAL;
130 
131 	if (get_address_map(&ctx))
132 		return -EINVAL;
133 
134 	if (denormalize_address(&ctx))
135 		return -EINVAL;
136 
137 	if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx))
138 		return -EINVAL;
139 
140 	if (dehash_address(&ctx))
141 		return -EINVAL;
142 
143 	if (late_hole_remove(&ctx) && add_base_and_hole(&ctx))
144 		return -EINVAL;
145 
146 	if (addr_over_limit(&ctx))
147 		return -EINVAL;
148 
149 	return ctx.ret_addr;
150 }
151 
152 static void check_for_legacy_df_access(void)
153 {
154 	/*
155 	 * All Zen-based systems before Family 19h use the legacy
156 	 * DF Indirect Access (FICAA/FICAD) offsets.
157 	 */
158 	if (boot_cpu_data.x86 < 0x19) {
159 		df_cfg.flags.legacy_ficaa = true;
160 		return;
161 	}
162 
163 	/* All systems after Family 19h use the current offsets. */
164 	if (boot_cpu_data.x86 > 0x19)
165 		return;
166 
167 	/* Some Family 19h systems use the legacy offsets. */
168 	switch (boot_cpu_data.x86_model) {
169 	case 0x00 ... 0x0f:
170 	case 0x20 ... 0x5f:
171 	       df_cfg.flags.legacy_ficaa = true;
172 	}
173 }
174 
175 /*
176  * This library provides functionality for AMD-based systems with a Data Fabric.
177  * The set of systems with a Data Fabric is equivalent to the set of Zen-based systems
178  * and the set of systems with the Scalable MCA feature at this time. However, these
179  * are technically independent things.
180  *
181  * It's possible to match on the PCI IDs of the Data Fabric devices, but this will be
182  * an ever expanding list. Instead, match on the SMCA and Zen features to cover all
183  * relevant systems.
184  */
185 static const struct x86_cpu_id amd_atl_cpuids[] = {
186 	X86_MATCH_FEATURE(X86_FEATURE_SMCA, NULL),
187 	X86_MATCH_FEATURE(X86_FEATURE_ZEN, NULL),
188 	{ }
189 };
190 MODULE_DEVICE_TABLE(x86cpu, amd_atl_cpuids);
191 
192 static int __init amd_atl_init(void)
193 {
194 	if (!x86_match_cpu(amd_atl_cpuids))
195 		return -ENODEV;
196 
197 	if (!amd_nb_num())
198 		return -ENODEV;
199 
200 	check_for_legacy_df_access();
201 
202 	if (get_df_system_info())
203 		return -ENODEV;
204 
205 	/* Increment this module's recount so that it can't be easily unloaded. */
206 	__module_get(THIS_MODULE);
207 	amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr);
208 
209 	pr_info("AMD Address Translation Library initialized");
210 	return 0;
211 }
212 
213 /*
214  * Exit function is only needed for testing and debug. Module unload must be
215  * forced to override refcount check.
216  */
217 static void __exit amd_atl_exit(void)
218 {
219 	amd_atl_unregister_decoder();
220 }
221 
222 module_init(amd_atl_init);
223 module_exit(amd_atl_exit);
224 
225 MODULE_LICENSE("GPL");
226