1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD Address Translation Library 4 * 5 * core.c : Module init and base translation functions 6 * 7 * Copyright (c) 2023, Advanced Micro Devices, Inc. 8 * All Rights Reserved. 9 * 10 * Author: Yazen Ghannam <Yazen.Ghannam@amd.com> 11 */ 12 13 #include <linux/module.h> 14 #include <asm/cpu_device_id.h> 15 16 #include "internal.h" 17 18 struct df_config df_cfg __read_mostly; 19 20 static int addr_over_limit(struct addr_ctx *ctx) 21 { 22 u64 dram_limit_addr; 23 24 if (df_cfg.rev >= DF4) 25 dram_limit_addr = FIELD_GET(DF4_DRAM_LIMIT_ADDR, ctx->map.limit); 26 else 27 dram_limit_addr = FIELD_GET(DF2_DRAM_LIMIT_ADDR, ctx->map.limit); 28 29 dram_limit_addr <<= DF_DRAM_BASE_LIMIT_LSB; 30 dram_limit_addr |= GENMASK(DF_DRAM_BASE_LIMIT_LSB - 1, 0); 31 32 /* Is calculated system address above DRAM limit address? */ 33 if (ctx->ret_addr > dram_limit_addr) { 34 atl_debug(ctx, "Calculated address (0x%016llx) > DRAM limit (0x%016llx)", 35 ctx->ret_addr, dram_limit_addr); 36 return -EINVAL; 37 } 38 39 return 0; 40 } 41 42 static bool legacy_hole_en(struct addr_ctx *ctx) 43 { 44 u32 reg = ctx->map.base; 45 46 if (df_cfg.rev >= DF4) 47 reg = ctx->map.ctl; 48 49 return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg); 50 } 51 52 static int add_legacy_hole(struct addr_ctx *ctx) 53 { 54 if (!legacy_hole_en(ctx)) 55 return 0; 56 57 if (ctx->ret_addr >= df_cfg.dram_hole_base) 58 ctx->ret_addr += (BIT_ULL(32) - df_cfg.dram_hole_base); 59 60 return 0; 61 } 62 63 static u64 get_base_addr(struct addr_ctx *ctx) 64 { 65 u64 base_addr; 66 67 if (df_cfg.rev >= DF4) 68 base_addr = FIELD_GET(DF4_BASE_ADDR, ctx->map.base); 69 else 70 base_addr = FIELD_GET(DF2_BASE_ADDR, ctx->map.base); 71 72 return base_addr << DF_DRAM_BASE_LIMIT_LSB; 73 } 74 75 static int add_base_and_hole(struct addr_ctx *ctx) 76 { 77 ctx->ret_addr += get_base_addr(ctx); 78 79 if (add_legacy_hole(ctx)) 80 return -EINVAL; 81 82 return 0; 83 } 84 85 static bool late_hole_remove(struct addr_ctx *ctx) 86 { 87 if (df_cfg.rev == DF3p5) 88 return true; 89 90 if (df_cfg.rev == DF4) 91 return true; 92 93 if (ctx->map.intlv_mode == DF3_6CHAN) 94 return true; 95 96 return false; 97 } 98 99 unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr) 100 { 101 struct addr_ctx ctx; 102 103 if (df_cfg.rev == UNKNOWN) 104 return -EINVAL; 105 106 memset(&ctx, 0, sizeof(ctx)); 107 108 /* Start from the normalized address */ 109 ctx.ret_addr = addr; 110 ctx.inst_id = coh_st_inst_id; 111 112 ctx.inputs.norm_addr = addr; 113 ctx.inputs.socket_id = socket_id; 114 ctx.inputs.die_id = die_id; 115 ctx.inputs.coh_st_inst_id = coh_st_inst_id; 116 117 if (legacy_hole_en(&ctx) && !df_cfg.dram_hole_base) 118 return -EINVAL; 119 120 if (determine_node_id(&ctx, socket_id, die_id)) 121 return -EINVAL; 122 123 if (get_address_map(&ctx)) 124 return -EINVAL; 125 126 if (denormalize_address(&ctx)) 127 return -EINVAL; 128 129 if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx)) 130 return -EINVAL; 131 132 if (dehash_address(&ctx)) 133 return -EINVAL; 134 135 if (late_hole_remove(&ctx) && add_base_and_hole(&ctx)) 136 return -EINVAL; 137 138 if (addr_over_limit(&ctx)) 139 return -EINVAL; 140 141 return ctx.ret_addr; 142 } 143 144 static void check_for_legacy_df_access(void) 145 { 146 /* 147 * All Zen-based systems before Family 19h use the legacy 148 * DF Indirect Access (FICAA/FICAD) offsets. 149 */ 150 if (boot_cpu_data.x86 < 0x19) { 151 df_cfg.flags.legacy_ficaa = true; 152 return; 153 } 154 155 /* All systems after Family 19h use the current offsets. */ 156 if (boot_cpu_data.x86 > 0x19) 157 return; 158 159 /* Some Family 19h systems use the legacy offsets. */ 160 switch (boot_cpu_data.x86_model) { 161 case 0x00 ... 0x0f: 162 case 0x20 ... 0x5f: 163 df_cfg.flags.legacy_ficaa = true; 164 } 165 } 166 167 /* 168 * This library provides functionality for AMD-based systems with a Data Fabric. 169 * The set of systems with a Data Fabric is equivalent to the set of Zen-based systems 170 * and the set of systems with the Scalable MCA feature at this time. However, these 171 * are technically independent things. 172 * 173 * It's possible to match on the PCI IDs of the Data Fabric devices, but this will be 174 * an ever expanding list. Instead, match on the SMCA and Zen features to cover all 175 * relevant systems. 176 */ 177 static const struct x86_cpu_id amd_atl_cpuids[] = { 178 X86_MATCH_FEATURE(X86_FEATURE_SMCA, NULL), 179 X86_MATCH_FEATURE(X86_FEATURE_ZEN, NULL), 180 { } 181 }; 182 MODULE_DEVICE_TABLE(x86cpu, amd_atl_cpuids); 183 184 static int __init amd_atl_init(void) 185 { 186 if (!x86_match_cpu(amd_atl_cpuids)) 187 return -ENODEV; 188 189 if (!amd_nb_num()) 190 return -ENODEV; 191 192 check_for_legacy_df_access(); 193 194 if (get_df_system_info()) 195 return -ENODEV; 196 197 /* Increment this module's recount so that it can't be easily unloaded. */ 198 __module_get(THIS_MODULE); 199 amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr); 200 201 pr_info("AMD Address Translation Library initialized"); 202 return 0; 203 } 204 205 /* 206 * Exit function is only needed for testing and debug. Module unload must be 207 * forced to override refcount check. 208 */ 209 static void __exit amd_atl_exit(void) 210 { 211 amd_atl_unregister_decoder(); 212 } 213 214 module_init(amd_atl_init); 215 module_exit(amd_atl_exit); 216 217 MODULE_DESCRIPTION("AMD Address Translation Library"); 218 MODULE_LICENSE("GPL"); 219