1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/list.h> 25 #include "amdgpu.h" 26 #include "amdgpu_xgmi.h" 27 #include "amdgpu_ras.h" 28 #include "soc15.h" 29 #include "df/df_3_6_offset.h" 30 #include "xgmi/xgmi_4_0_0_smn.h" 31 #include "xgmi/xgmi_4_0_0_sh_mask.h" 32 #include "xgmi/xgmi_6_1_0_sh_mask.h" 33 #include "wafl/wafl2_4_0_0_smn.h" 34 #include "wafl/wafl2_4_0_0_sh_mask.h" 35 36 #include "amdgpu_reset.h" 37 38 #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c 39 #define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218 40 #define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210 41 #define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218 42 43 #define XGMI_STATE_DISABLE 0xD1 44 #define XGMI_STATE_LS0 0x81 45 #define XGMI_LINK_ACTIVE 1 46 #define XGMI_LINK_INACTIVE 0 47 48 static DEFINE_MUTEX(xgmi_mutex); 49 50 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 51 52 static LIST_HEAD(xgmi_hive_list); 53 54 static const int xgmi_pcs_err_status_reg_vg20[] = { 55 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, 56 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, 57 }; 58 59 static const int wafl_pcs_err_status_reg_vg20[] = { 60 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, 61 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, 62 }; 63 64 static const int xgmi_pcs_err_status_reg_arct[] = { 65 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, 66 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, 67 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000, 68 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000, 69 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000, 70 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000, 71 }; 72 73 /* same as vg20*/ 74 static const int wafl_pcs_err_status_reg_arct[] = { 75 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, 76 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, 77 }; 78 79 static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = { 80 smnPCS_XGMI3X16_PCS_ERROR_STATUS, 81 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000, 82 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000, 83 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000, 84 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000, 85 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000, 86 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000, 87 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000 88 }; 89 90 static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = { 91 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK, 92 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000, 93 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000, 94 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000, 95 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000, 96 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000, 97 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000, 98 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000 99 }; 100 101 static const int walf_pcs_err_status_reg_aldebaran[] = { 102 smnPCS_GOPX1_PCS_ERROR_STATUS, 103 smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000 104 }; 105 106 static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = { 107 smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK, 108 smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 109 }; 110 111 static const int xgmi3x16_pcs_err_status_reg_v6_4[] = { 112 smnPCS_XGMI3X16_PCS_ERROR_STATUS, 113 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000 114 }; 115 116 static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = { 117 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK, 118 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000 119 }; 120 121 static const u64 xgmi_v6_4_0_mca_base_array[] = { 122 0x11a09200, 123 0x11b09200, 124 }; 125 126 static const char *xgmi_v6_4_0_ras_error_code_ext[32] = { 127 [0x00] = "XGMI PCS DataLossErr", 128 [0x01] = "XGMI PCS TrainingErr", 129 [0x02] = "XGMI PCS FlowCtrlAckErr", 130 [0x03] = "XGMI PCS RxFifoUnderflowErr", 131 [0x04] = "XGMI PCS RxFifoOverflowErr", 132 [0x05] = "XGMI PCS CRCErr", 133 [0x06] = "XGMI PCS BERExceededErr", 134 [0x07] = "XGMI PCS TxMetaDataErr", 135 [0x08] = "XGMI PCS ReplayBufParityErr", 136 [0x09] = "XGMI PCS DataParityErr", 137 [0x0a] = "XGMI PCS ReplayFifoOverflowErr", 138 [0x0b] = "XGMI PCS ReplayFifoUnderflowErr", 139 [0x0c] = "XGMI PCS ElasticFifoOverflowErr", 140 [0x0d] = "XGMI PCS DeskewErr", 141 [0x0e] = "XGMI PCS FlowCtrlCRCErr", 142 [0x0f] = "XGMI PCS DataStartupLimitErr", 143 [0x10] = "XGMI PCS FCInitTimeoutErr", 144 [0x11] = "XGMI PCS RecoveryTimeoutErr", 145 [0x12] = "XGMI PCS ReadySerialTimeoutErr", 146 [0x13] = "XGMI PCS ReadySerialAttemptErr", 147 [0x14] = "XGMI PCS RecoveryAttemptErr", 148 [0x15] = "XGMI PCS RecoveryRelockAttemptErr", 149 [0x16] = "XGMI PCS ReplayAttemptErr", 150 [0x17] = "XGMI PCS SyncHdrErr", 151 [0x18] = "XGMI PCS TxReplayTimeoutErr", 152 [0x19] = "XGMI PCS RxReplayTimeoutErr", 153 [0x1a] = "XGMI PCS LinkSubTxTimeoutErr", 154 [0x1b] = "XGMI PCS LinkSubRxTimeoutErr", 155 [0x1c] = "XGMI PCS RxCMDPktErr", 156 }; 157 158 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { 159 {"XGMI PCS DataLossErr", 160 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, 161 {"XGMI PCS TrainingErr", 162 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)}, 163 {"XGMI PCS CRCErr", 164 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)}, 165 {"XGMI PCS BERExceededErr", 166 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)}, 167 {"XGMI PCS TxMetaDataErr", 168 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)}, 169 {"XGMI PCS ReplayBufParityErr", 170 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)}, 171 {"XGMI PCS DataParityErr", 172 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)}, 173 {"XGMI PCS ReplayFifoOverflowErr", 174 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, 175 {"XGMI PCS ReplayFifoUnderflowErr", 176 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, 177 {"XGMI PCS ElasticFifoOverflowErr", 178 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, 179 {"XGMI PCS DeskewErr", 180 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)}, 181 {"XGMI PCS DataStartupLimitErr", 182 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)}, 183 {"XGMI PCS FCInitTimeoutErr", 184 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, 185 {"XGMI PCS RecoveryTimeoutErr", 186 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, 187 {"XGMI PCS ReadySerialTimeoutErr", 188 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, 189 {"XGMI PCS ReadySerialAttemptErr", 190 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, 191 {"XGMI PCS RecoveryAttemptErr", 192 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, 193 {"XGMI PCS RecoveryRelockAttemptErr", 194 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, 195 }; 196 197 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { 198 {"WAFL PCS DataLossErr", 199 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)}, 200 {"WAFL PCS TrainingErr", 201 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)}, 202 {"WAFL PCS CRCErr", 203 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)}, 204 {"WAFL PCS BERExceededErr", 205 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)}, 206 {"WAFL PCS TxMetaDataErr", 207 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)}, 208 {"WAFL PCS ReplayBufParityErr", 209 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)}, 210 {"WAFL PCS DataParityErr", 211 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)}, 212 {"WAFL PCS ReplayFifoOverflowErr", 213 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, 214 {"WAFL PCS ReplayFifoUnderflowErr", 215 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, 216 {"WAFL PCS ElasticFifoOverflowErr", 217 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, 218 {"WAFL PCS DeskewErr", 219 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)}, 220 {"WAFL PCS DataStartupLimitErr", 221 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)}, 222 {"WAFL PCS FCInitTimeoutErr", 223 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)}, 224 {"WAFL PCS RecoveryTimeoutErr", 225 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, 226 {"WAFL PCS ReadySerialTimeoutErr", 227 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, 228 {"WAFL PCS ReadySerialAttemptErr", 229 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, 230 {"WAFL PCS RecoveryAttemptErr", 231 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)}, 232 {"WAFL PCS RecoveryRelockAttemptErr", 233 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, 234 }; 235 236 static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = { 237 {"XGMI3X16 PCS DataLossErr", 238 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)}, 239 {"XGMI3X16 PCS TrainingErr", 240 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)}, 241 {"XGMI3X16 PCS FlowCtrlAckErr", 242 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)}, 243 {"XGMI3X16 PCS RxFifoUnderflowErr", 244 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)}, 245 {"XGMI3X16 PCS RxFifoOverflowErr", 246 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)}, 247 {"XGMI3X16 PCS CRCErr", 248 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)}, 249 {"XGMI3X16 PCS BERExceededErr", 250 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)}, 251 {"XGMI3X16 PCS TxVcidDataErr", 252 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)}, 253 {"XGMI3X16 PCS ReplayBufParityErr", 254 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)}, 255 {"XGMI3X16 PCS DataParityErr", 256 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)}, 257 {"XGMI3X16 PCS ReplayFifoOverflowErr", 258 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, 259 {"XGMI3X16 PCS ReplayFifoUnderflowErr", 260 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, 261 {"XGMI3X16 PCS ElasticFifoOverflowErr", 262 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, 263 {"XGMI3X16 PCS DeskewErr", 264 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)}, 265 {"XGMI3X16 PCS FlowCtrlCRCErr", 266 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)}, 267 {"XGMI3X16 PCS DataStartupLimitErr", 268 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)}, 269 {"XGMI3X16 PCS FCInitTimeoutErr", 270 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, 271 {"XGMI3X16 PCS RecoveryTimeoutErr", 272 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, 273 {"XGMI3X16 PCS ReadySerialTimeoutErr", 274 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, 275 {"XGMI3X16 PCS ReadySerialAttemptErr", 276 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, 277 {"XGMI3X16 PCS RecoveryAttemptErr", 278 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, 279 {"XGMI3X16 PCS RecoveryRelockAttemptErr", 280 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, 281 {"XGMI3X16 PCS ReplayAttemptErr", 282 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)}, 283 {"XGMI3X16 PCS SyncHdrErr", 284 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)}, 285 {"XGMI3X16 PCS TxReplayTimeoutErr", 286 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)}, 287 {"XGMI3X16 PCS RxReplayTimeoutErr", 288 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)}, 289 {"XGMI3X16 PCS LinkSubTxTimeoutErr", 290 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)}, 291 {"XGMI3X16 PCS LinkSubRxTimeoutErr", 292 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)}, 293 {"XGMI3X16 PCS RxCMDPktErr", 294 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)}, 295 }; 296 297 static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num) 298 { 299 const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070; 300 const int xgmi_inst = 2; 301 u32 link_inst; 302 u64 addr; 303 304 link_inst = global_link_num % xgmi_inst; 305 306 addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) + 307 adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst); 308 309 return RREG32_PCIE_EXT(addr); 310 } 311 312 int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num) 313 { 314 u32 xgmi_state_reg_val; 315 316 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 317 case IP_VERSION(6, 4, 0): 318 xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num); 319 break; 320 default: 321 return -EOPNOTSUPP; 322 } 323 324 if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_DISABLE) 325 return -ENOLINK; 326 327 if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0) 328 return XGMI_LINK_ACTIVE; 329 330 return XGMI_LINK_INACTIVE; 331 } 332 333 /** 334 * DOC: AMDGPU XGMI Support 335 * 336 * XGMI is a high speed interconnect that joins multiple GPU cards 337 * into a homogeneous memory space that is organized by a collective 338 * hive ID and individual node IDs, both of which are 64-bit numbers. 339 * 340 * The file xgmi_device_id contains the unique per GPU device ID and 341 * is stored in the /sys/class/drm/card${cardno}/device/ directory. 342 * 343 * Inside the device directory a sub-directory 'xgmi_hive_info' is 344 * created which contains the hive ID and the list of nodes. 345 * 346 * The hive ID is stored in: 347 * /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id 348 * 349 * The node information is stored in numbered directories: 350 * /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id 351 * 352 * Each device has their own xgmi_hive_info direction with a mirror 353 * set of node sub-directories. 354 * 355 * The XGMI memory space is built by contiguously adding the power of 356 * two padded VRAM space from each node to each other. 357 * 358 */ 359 360 static struct attribute amdgpu_xgmi_hive_id = { 361 .name = "xgmi_hive_id", 362 .mode = S_IRUGO 363 }; 364 365 static struct attribute *amdgpu_xgmi_hive_attrs[] = { 366 &amdgpu_xgmi_hive_id, 367 NULL 368 }; 369 ATTRIBUTE_GROUPS(amdgpu_xgmi_hive); 370 371 static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj, 372 struct attribute *attr, char *buf) 373 { 374 struct amdgpu_hive_info *hive = container_of( 375 kobj, struct amdgpu_hive_info, kobj); 376 377 if (attr == &amdgpu_xgmi_hive_id) 378 return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); 379 380 return 0; 381 } 382 383 static void amdgpu_xgmi_hive_release(struct kobject *kobj) 384 { 385 struct amdgpu_hive_info *hive = container_of( 386 kobj, struct amdgpu_hive_info, kobj); 387 388 amdgpu_reset_put_reset_domain(hive->reset_domain); 389 hive->reset_domain = NULL; 390 391 mutex_destroy(&hive->hive_lock); 392 kfree(hive); 393 } 394 395 static const struct sysfs_ops amdgpu_xgmi_hive_ops = { 396 .show = amdgpu_xgmi_show_attrs, 397 }; 398 399 static const struct kobj_type amdgpu_xgmi_hive_type = { 400 .release = amdgpu_xgmi_hive_release, 401 .sysfs_ops = &amdgpu_xgmi_hive_ops, 402 .default_groups = amdgpu_xgmi_hive_groups, 403 }; 404 405 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, 406 struct device_attribute *attr, 407 char *buf) 408 { 409 struct drm_device *ddev = dev_get_drvdata(dev); 410 struct amdgpu_device *adev = drm_to_adev(ddev); 411 412 return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id); 413 414 } 415 416 static ssize_t amdgpu_xgmi_show_physical_id(struct device *dev, 417 struct device_attribute *attr, 418 char *buf) 419 { 420 struct drm_device *ddev = dev_get_drvdata(dev); 421 struct amdgpu_device *adev = drm_to_adev(ddev); 422 423 return sysfs_emit(buf, "%u\n", adev->gmc.xgmi.physical_node_id); 424 425 } 426 427 static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev, 428 struct device_attribute *attr, 429 char *buf) 430 { 431 struct drm_device *ddev = dev_get_drvdata(dev); 432 struct amdgpu_device *adev = drm_to_adev(ddev); 433 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; 434 int i; 435 436 for (i = 0; i < top->num_nodes; i++) 437 sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops); 438 439 return sysfs_emit(buf, "%s\n", buf); 440 } 441 442 static ssize_t amdgpu_xgmi_show_num_links(struct device *dev, 443 struct device_attribute *attr, 444 char *buf) 445 { 446 struct drm_device *ddev = dev_get_drvdata(dev); 447 struct amdgpu_device *adev = drm_to_adev(ddev); 448 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; 449 int i; 450 451 for (i = 0; i < top->num_nodes; i++) 452 sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links); 453 454 return sysfs_emit(buf, "%s\n", buf); 455 } 456 457 static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev, 458 struct device_attribute *attr, 459 char *buf) 460 { 461 struct drm_device *ddev = dev_get_drvdata(dev); 462 struct amdgpu_device *adev = drm_to_adev(ddev); 463 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; 464 int i, j, size = 0; 465 int current_node; 466 /* 467 * get the node id in the sysfs for the current socket and show 468 * it in the port num info output in the sysfs for easy reading. 469 * it is NOT the one retrieved from xgmi ta. 470 */ 471 for (i = 0; i < top->num_nodes; i++) { 472 if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) { 473 current_node = i; 474 break; 475 } 476 } 477 478 if (i == top->num_nodes) 479 return -EINVAL; 480 481 for (i = 0; i < top->num_nodes; i++) { 482 for (j = 0; j < top->nodes[i].num_links; j++) 483 /* node id in sysfs starts from 1 rather than 0 so +1 here */ 484 size += sysfs_emit_at(buf, size, "%02x:%02x -> %02x:%02x\n", current_node + 1, 485 top->nodes[i].port_num[j].src_xgmi_port_num, i + 1, 486 top->nodes[i].port_num[j].dst_xgmi_port_num); 487 } 488 489 return size; 490 } 491 492 #define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801) 493 static ssize_t amdgpu_xgmi_show_error(struct device *dev, 494 struct device_attribute *attr, 495 char *buf) 496 { 497 struct drm_device *ddev = dev_get_drvdata(dev); 498 struct amdgpu_device *adev = drm_to_adev(ddev); 499 uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in; 500 uint64_t fica_out; 501 unsigned int error_count = 0; 502 503 ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); 504 ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); 505 506 if ((!adev->df.funcs) || 507 (!adev->df.funcs->get_fica) || 508 (!adev->df.funcs->set_fica)) 509 return -EINVAL; 510 511 fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); 512 if (fica_out != 0x1f) 513 pr_err("xGMI error counters not enabled!\n"); 514 515 fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in); 516 517 if ((fica_out & 0xffff) == 2) 518 error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63); 519 520 adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); 521 522 return sysfs_emit(buf, "%u\n", error_count); 523 } 524 525 526 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL); 527 static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL); 528 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL); 529 static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL); 530 static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL); 531 static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL); 532 533 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, 534 struct amdgpu_hive_info *hive) 535 { 536 int ret = 0; 537 char node[10] = { 0 }; 538 539 /* Create xgmi device id file */ 540 ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id); 541 if (ret) { 542 dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n"); 543 return ret; 544 } 545 546 ret = device_create_file(adev->dev, &dev_attr_xgmi_physical_id); 547 if (ret) { 548 dev_err(adev->dev, "XGMI: Failed to create device file xgmi_physical_id\n"); 549 return ret; 550 } 551 552 /* Create xgmi error file */ 553 ret = device_create_file(adev->dev, &dev_attr_xgmi_error); 554 if (ret) 555 pr_err("failed to create xgmi_error\n"); 556 557 /* Create xgmi num hops file */ 558 ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops); 559 if (ret) 560 pr_err("failed to create xgmi_num_hops\n"); 561 562 /* Create xgmi num links file */ 563 ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links); 564 if (ret) 565 pr_err("failed to create xgmi_num_links\n"); 566 567 /* Create xgmi port num file if supported */ 568 if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) { 569 ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num); 570 if (ret) 571 dev_err(adev->dev, "failed to create xgmi_port_num\n"); 572 } 573 574 /* Create sysfs link to hive info folder on the first device */ 575 if (hive->kobj.parent != (&adev->dev->kobj)) { 576 ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj, 577 "xgmi_hive_info"); 578 if (ret) { 579 dev_err(adev->dev, "XGMI: Failed to create link to hive info"); 580 goto remove_file; 581 } 582 } 583 584 sprintf(node, "node%d", atomic_read(&hive->number_devices)); 585 /* Create sysfs link form the hive folder to yourself */ 586 ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node); 587 if (ret) { 588 dev_err(adev->dev, "XGMI: Failed to create link from hive info"); 589 goto remove_link; 590 } 591 592 goto success; 593 594 595 remove_link: 596 sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique); 597 598 remove_file: 599 device_remove_file(adev->dev, &dev_attr_xgmi_device_id); 600 device_remove_file(adev->dev, &dev_attr_xgmi_physical_id); 601 device_remove_file(adev->dev, &dev_attr_xgmi_error); 602 device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); 603 device_remove_file(adev->dev, &dev_attr_xgmi_num_links); 604 if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) 605 device_remove_file(adev->dev, &dev_attr_xgmi_port_num); 606 607 success: 608 return ret; 609 } 610 611 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, 612 struct amdgpu_hive_info *hive) 613 { 614 char node[10]; 615 memset(node, 0, sizeof(node)); 616 617 device_remove_file(adev->dev, &dev_attr_xgmi_device_id); 618 device_remove_file(adev->dev, &dev_attr_xgmi_physical_id); 619 device_remove_file(adev->dev, &dev_attr_xgmi_error); 620 device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); 621 device_remove_file(adev->dev, &dev_attr_xgmi_num_links); 622 if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) 623 device_remove_file(adev->dev, &dev_attr_xgmi_port_num); 624 625 if (hive->kobj.parent != (&adev->dev->kobj)) 626 sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info"); 627 628 sprintf(node, "node%d", atomic_read(&hive->number_devices)); 629 sysfs_remove_link(&hive->kobj, node); 630 631 } 632 633 634 635 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) 636 { 637 struct amdgpu_hive_info *hive = NULL; 638 int ret; 639 640 if (!adev->gmc.xgmi.hive_id) 641 return NULL; 642 643 if (adev->hive) { 644 kobject_get(&adev->hive->kobj); 645 return adev->hive; 646 } 647 648 mutex_lock(&xgmi_mutex); 649 650 list_for_each_entry(hive, &xgmi_hive_list, node) { 651 if (hive->hive_id == adev->gmc.xgmi.hive_id) 652 goto pro_end; 653 } 654 655 hive = kzalloc(sizeof(*hive), GFP_KERNEL); 656 if (!hive) { 657 dev_err(adev->dev, "XGMI: allocation failed\n"); 658 ret = -ENOMEM; 659 hive = NULL; 660 goto pro_end; 661 } 662 663 /* initialize new hive if not exist */ 664 ret = kobject_init_and_add(&hive->kobj, 665 &amdgpu_xgmi_hive_type, 666 &adev->dev->kobj, 667 "%s", "xgmi_hive_info"); 668 if (ret) { 669 dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); 670 kobject_put(&hive->kobj); 671 hive = NULL; 672 goto pro_end; 673 } 674 675 /** 676 * Only init hive->reset_domain for none SRIOV configuration. For SRIOV, 677 * Host driver decide how to reset the GPU either through FLR or chain reset. 678 * Guest side will get individual notifications from the host for the FLR 679 * if necessary. 680 */ 681 if (!amdgpu_sriov_vf(adev)) { 682 /** 683 * Avoid recreating reset domain when hive is reconstructed for the case 684 * of reset the devices in the XGMI hive during probe for passthrough GPU 685 * See https://www.spinics.net/lists/amd-gfx/msg58836.html 686 */ 687 if (adev->reset_domain->type != XGMI_HIVE) { 688 hive->reset_domain = 689 amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive"); 690 if (!hive->reset_domain) { 691 dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n"); 692 ret = -ENOMEM; 693 kobject_put(&hive->kobj); 694 hive = NULL; 695 goto pro_end; 696 } 697 } else { 698 amdgpu_reset_get_reset_domain(adev->reset_domain); 699 hive->reset_domain = adev->reset_domain; 700 } 701 } 702 703 hive->hive_id = adev->gmc.xgmi.hive_id; 704 INIT_LIST_HEAD(&hive->device_list); 705 INIT_LIST_HEAD(&hive->node); 706 mutex_init(&hive->hive_lock); 707 atomic_set(&hive->number_devices, 0); 708 task_barrier_init(&hive->tb); 709 hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; 710 hive->hi_req_gpu = NULL; 711 atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); 712 713 /* 714 * hive pstate on boot is high in vega20 so we have to go to low 715 * pstate on after boot. 716 */ 717 hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; 718 list_add_tail(&hive->node, &xgmi_hive_list); 719 720 pro_end: 721 if (hive) 722 kobject_get(&hive->kobj); 723 mutex_unlock(&xgmi_mutex); 724 return hive; 725 } 726 727 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive) 728 { 729 if (hive) 730 kobject_put(&hive->kobj); 731 } 732 733 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) 734 { 735 int ret = 0; 736 struct amdgpu_hive_info *hive; 737 struct amdgpu_device *request_adev; 738 bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20; 739 bool init_low; 740 741 hive = amdgpu_get_xgmi_hive(adev); 742 if (!hive) 743 return 0; 744 745 request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev; 746 init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN; 747 amdgpu_put_xgmi_hive(hive); 748 /* fw bug so temporarily disable pstate switching */ 749 return 0; 750 751 if (!hive || adev->asic_type != CHIP_VEGA20) 752 return 0; 753 754 mutex_lock(&hive->hive_lock); 755 756 if (is_hi_req) 757 hive->hi_req_count++; 758 else 759 hive->hi_req_count--; 760 761 /* 762 * Vega20 only needs single peer to request pstate high for the hive to 763 * go high but all peers must request pstate low for the hive to go low 764 */ 765 if (hive->pstate == pstate || 766 (!is_hi_req && hive->hi_req_count && !init_low)) 767 goto out; 768 769 dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate); 770 771 ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate); 772 if (ret) { 773 dev_err(request_adev->dev, 774 "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", 775 request_adev->gmc.xgmi.node_id, 776 request_adev->gmc.xgmi.hive_id, ret); 777 goto out; 778 } 779 780 if (init_low) 781 hive->pstate = hive->hi_req_count ? 782 hive->pstate : AMDGPU_XGMI_PSTATE_MIN; 783 else { 784 hive->pstate = pstate; 785 hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ? 786 adev : NULL; 787 } 788 out: 789 mutex_unlock(&hive->hive_lock); 790 return ret; 791 } 792 793 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev) 794 { 795 int ret; 796 797 if (amdgpu_sriov_vf(adev)) 798 return 0; 799 800 /* Each psp need to set the latest topology */ 801 ret = psp_xgmi_set_topology_info(&adev->psp, 802 atomic_read(&hive->number_devices), 803 &adev->psp.xgmi_context.top_info); 804 if (ret) 805 dev_err(adev->dev, 806 "XGMI: Set topology failure on device %llx, hive %llx, ret %d", 807 adev->gmc.xgmi.node_id, 808 adev->gmc.xgmi.hive_id, ret); 809 810 return ret; 811 } 812 813 814 /* 815 * NOTE psp_xgmi_node_info.num_hops layout is as follows: 816 * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved) 817 * num_hops[5:3] = reserved 818 * num_hops[2:0] = number of hops 819 */ 820 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, 821 struct amdgpu_device *peer_adev) 822 { 823 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; 824 uint8_t num_hops_mask = 0x7; 825 int i; 826 827 for (i = 0 ; i < top->num_nodes; ++i) 828 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) 829 return top->nodes[i].num_hops & num_hops_mask; 830 return -EINVAL; 831 } 832 833 int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, 834 struct amdgpu_device *peer_adev) 835 { 836 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; 837 int i; 838 839 for (i = 0 ; i < top->num_nodes; ++i) 840 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) 841 return top->nodes[i].num_links; 842 return -EINVAL; 843 } 844 845 bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev, 846 struct amdgpu_device *peer_adev) 847 { 848 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; 849 int i; 850 851 /* Sharing should always be enabled for non-SRIOV. */ 852 if (!amdgpu_sriov_vf(adev)) 853 return true; 854 855 for (i = 0 ; i < top->num_nodes; ++i) 856 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) 857 return !!top->nodes[i].is_sharing_enabled; 858 859 return false; 860 } 861 862 /* 863 * Devices that support extended data require the entire hive to initialize with 864 * the shared memory buffer flag set. 865 * 866 * Hive locks and conditions apply - see amdgpu_xgmi_add_device 867 */ 868 static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive, 869 bool set_extended_data) 870 { 871 struct amdgpu_device *tmp_adev; 872 int ret; 873 874 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 875 ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false); 876 if (ret) { 877 dev_err(tmp_adev->dev, 878 "XGMI: Failed to initialize xgmi session for data partition %i\n", 879 set_extended_data); 880 return ret; 881 } 882 883 } 884 885 return 0; 886 } 887 888 static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev, 889 struct amdgpu_device *peer_adev) 890 { 891 struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info; 892 struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info; 893 894 for (int i = 0; i < peer_info->num_nodes; i++) { 895 if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) { 896 for (int j = 0; j < top_info->num_nodes; j++) { 897 if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) { 898 peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops; 899 peer_info->nodes[i].is_sharing_enabled = 900 top_info->nodes[j].is_sharing_enabled; 901 peer_info->nodes[i].num_links = 902 top_info->nodes[j].num_links; 903 return; 904 } 905 } 906 } 907 } 908 } 909 910 int amdgpu_xgmi_add_device(struct amdgpu_device *adev) 911 { 912 struct psp_xgmi_topology_info *top_info; 913 struct amdgpu_hive_info *hive; 914 struct amdgpu_xgmi *entry; 915 struct amdgpu_device *tmp_adev = NULL; 916 917 int count = 0, ret = 0; 918 919 if (!adev->gmc.xgmi.supported) 920 return 0; 921 922 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { 923 ret = psp_xgmi_initialize(&adev->psp, false, true); 924 if (ret) { 925 dev_err(adev->dev, 926 "XGMI: Failed to initialize xgmi session\n"); 927 return ret; 928 } 929 930 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); 931 if (ret) { 932 dev_err(adev->dev, 933 "XGMI: Failed to get hive id\n"); 934 return ret; 935 } 936 937 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); 938 if (ret) { 939 dev_err(adev->dev, 940 "XGMI: Failed to get node id\n"); 941 return ret; 942 } 943 } else { 944 adev->gmc.xgmi.hive_id = 16; 945 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; 946 } 947 948 hive = amdgpu_get_xgmi_hive(adev); 949 if (!hive) { 950 ret = -EINVAL; 951 dev_err(adev->dev, 952 "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n", 953 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); 954 goto exit; 955 } 956 mutex_lock(&hive->hive_lock); 957 958 top_info = &adev->psp.xgmi_context.top_info; 959 960 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); 961 list_for_each_entry(entry, &hive->device_list, head) 962 top_info->nodes[count++].node_id = entry->node_id; 963 top_info->num_nodes = count; 964 atomic_set(&hive->number_devices, count); 965 966 task_barrier_add_task(&hive->tb); 967 968 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { 969 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 970 /* update node list for other device in the hive */ 971 if (tmp_adev != adev) { 972 top_info = &tmp_adev->psp.xgmi_context.top_info; 973 top_info->nodes[count - 1].node_id = 974 adev->gmc.xgmi.node_id; 975 top_info->num_nodes = count; 976 } 977 ret = amdgpu_xgmi_update_topology(hive, tmp_adev); 978 if (ret) 979 goto exit_unlock; 980 } 981 982 if (amdgpu_sriov_vf(adev) && 983 adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) { 984 /* only get topology for VF being init if it can support full duplex */ 985 ret = psp_xgmi_get_topology_info(&adev->psp, count, 986 &adev->psp.xgmi_context.top_info, false); 987 if (ret) { 988 dev_err(adev->dev, 989 "XGMI: Get topology failure on device %llx, hive %llx, ret %d", 990 adev->gmc.xgmi.node_id, 991 adev->gmc.xgmi.hive_id, ret); 992 /* To do: continue with some node failed or disable the whole hive*/ 993 goto exit_unlock; 994 } 995 996 /* fill the topology info for peers instead of getting from PSP */ 997 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 998 amdgpu_xgmi_fill_topology_info(adev, tmp_adev); 999 } 1000 } else { 1001 /* get latest topology info for each device from psp */ 1002 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 1003 ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, 1004 &tmp_adev->psp.xgmi_context.top_info, false); 1005 if (ret) { 1006 dev_err(tmp_adev->dev, 1007 "XGMI: Get topology failure on device %llx, hive %llx, ret %d", 1008 tmp_adev->gmc.xgmi.node_id, 1009 tmp_adev->gmc.xgmi.hive_id, ret); 1010 /* To do : continue with some node failed or disable the whole hive */ 1011 goto exit_unlock; 1012 } 1013 } 1014 } 1015 1016 /* get topology again for hives that support extended data */ 1017 if (adev->psp.xgmi_context.supports_extended_data) { 1018 1019 /* initialize the hive to get extended data. */ 1020 ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true); 1021 if (ret) 1022 goto exit_unlock; 1023 1024 /* get the extended data. */ 1025 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 1026 ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, 1027 &tmp_adev->psp.xgmi_context.top_info, true); 1028 if (ret) { 1029 dev_err(tmp_adev->dev, 1030 "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d", 1031 tmp_adev->gmc.xgmi.node_id, 1032 tmp_adev->gmc.xgmi.hive_id, ret); 1033 goto exit_unlock; 1034 } 1035 } 1036 1037 /* initialize the hive to get non-extended data for the next round. */ 1038 ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false); 1039 if (ret) 1040 goto exit_unlock; 1041 1042 } 1043 } 1044 1045 if (!ret) 1046 ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); 1047 1048 exit_unlock: 1049 mutex_unlock(&hive->hive_lock); 1050 exit: 1051 if (!ret) { 1052 adev->hive = hive; 1053 dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n", 1054 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); 1055 } else { 1056 amdgpu_put_xgmi_hive(hive); 1057 dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n", 1058 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, 1059 ret); 1060 } 1061 1062 return ret; 1063 } 1064 1065 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) 1066 { 1067 struct amdgpu_hive_info *hive = adev->hive; 1068 1069 if (!adev->gmc.xgmi.supported) 1070 return -EINVAL; 1071 1072 if (!hive) 1073 return -EINVAL; 1074 1075 mutex_lock(&hive->hive_lock); 1076 task_barrier_rem_task(&hive->tb); 1077 amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); 1078 if (hive->hi_req_gpu == adev) 1079 hive->hi_req_gpu = NULL; 1080 list_del(&adev->gmc.xgmi.head); 1081 mutex_unlock(&hive->hive_lock); 1082 1083 amdgpu_put_xgmi_hive(hive); 1084 adev->hive = NULL; 1085 1086 if (atomic_dec_return(&hive->number_devices) == 0) { 1087 /* Remove the hive from global hive list */ 1088 mutex_lock(&xgmi_mutex); 1089 list_del(&hive->node); 1090 mutex_unlock(&xgmi_mutex); 1091 1092 amdgpu_put_xgmi_hive(hive); 1093 } 1094 1095 return 0; 1096 } 1097 1098 static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 1099 enum aca_smu_type type, void *data) 1100 { 1101 struct amdgpu_device *adev = handle->adev; 1102 struct aca_bank_info info; 1103 const char *error_str; 1104 u64 status, count; 1105 int ret, ext_error_code; 1106 1107 ret = aca_bank_info_decode(bank, &info); 1108 if (ret) 1109 return ret; 1110 1111 status = bank->regs[ACA_REG_IDX_STATUS]; 1112 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); 1113 1114 error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ? 1115 xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL; 1116 if (error_str) 1117 dev_info(adev->dev, "%s detected\n", error_str); 1118 1119 count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); 1120 1121 switch (type) { 1122 case ACA_SMU_TYPE_UE: 1123 if (ext_error_code != 0 && ext_error_code != 9) 1124 count = 0ULL; 1125 1126 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count); 1127 break; 1128 case ACA_SMU_TYPE_CE: 1129 count = ext_error_code == 6 ? count : 0ULL; 1130 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count); 1131 break; 1132 default: 1133 return -EINVAL; 1134 } 1135 1136 return ret; 1137 } 1138 1139 static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = { 1140 .aca_bank_parser = xgmi_v6_4_0_aca_bank_parser, 1141 }; 1142 1143 static const struct aca_info xgmi_v6_4_0_aca_info = { 1144 .hwip = ACA_HWIP_TYPE_PCS_XGMI, 1145 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, 1146 .bank_ops = &xgmi_v6_4_0_aca_bank_ops, 1147 }; 1148 1149 static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1150 { 1151 int r; 1152 1153 if (!adev->gmc.xgmi.supported || 1154 adev->gmc.xgmi.num_physical_nodes == 0) 1155 return 0; 1156 1157 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL); 1158 1159 r = amdgpu_ras_block_late_init(adev, ras_block); 1160 if (r) 1161 return r; 1162 1163 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 1164 case IP_VERSION(6, 4, 0): 1165 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL, 1166 &xgmi_v6_4_0_aca_info, NULL); 1167 if (r) 1168 goto late_fini; 1169 break; 1170 default: 1171 break; 1172 } 1173 1174 return 0; 1175 1176 late_fini: 1177 amdgpu_ras_block_late_fini(adev, ras_block); 1178 1179 return r; 1180 } 1181 1182 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, 1183 uint64_t addr) 1184 { 1185 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; 1186 return (addr + xgmi->physical_node_id * xgmi->node_segment_size); 1187 } 1188 1189 static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg) 1190 { 1191 WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF); 1192 WREG32_PCIE(pcs_status_reg, 0); 1193 } 1194 1195 static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev) 1196 { 1197 uint32_t i; 1198 1199 switch (adev->asic_type) { 1200 case CHIP_ARCTURUS: 1201 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) 1202 pcs_clear_status(adev, 1203 xgmi_pcs_err_status_reg_arct[i]); 1204 break; 1205 case CHIP_VEGA20: 1206 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) 1207 pcs_clear_status(adev, 1208 xgmi_pcs_err_status_reg_vg20[i]); 1209 break; 1210 case CHIP_ALDEBARAN: 1211 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) 1212 pcs_clear_status(adev, 1213 xgmi3x16_pcs_err_status_reg_aldebaran[i]); 1214 for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) 1215 pcs_clear_status(adev, 1216 walf_pcs_err_status_reg_aldebaran[i]); 1217 break; 1218 default: 1219 break; 1220 } 1221 1222 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 1223 case IP_VERSION(6, 4, 0): 1224 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) 1225 pcs_clear_status(adev, 1226 xgmi3x16_pcs_err_status_reg_v6_4[i]); 1227 break; 1228 default: 1229 break; 1230 } 1231 } 1232 1233 static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base) 1234 { 1235 WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL); 1236 } 1237 1238 static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst) 1239 { 1240 int i; 1241 1242 for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++) 1243 __xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]); 1244 } 1245 1246 static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev) 1247 { 1248 int i; 1249 1250 for_each_inst(i, adev->aid_mask) 1251 xgmi_v6_4_0_reset_error_count(adev, i); 1252 } 1253 1254 static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) 1255 { 1256 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 1257 case IP_VERSION(6, 4, 0): 1258 xgmi_v6_4_0_reset_ras_error_count(adev); 1259 break; 1260 default: 1261 amdgpu_xgmi_legacy_reset_ras_error_count(adev); 1262 break; 1263 } 1264 } 1265 1266 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, 1267 uint32_t value, 1268 uint32_t mask_value, 1269 uint32_t *ue_count, 1270 uint32_t *ce_count, 1271 bool is_xgmi_pcs, 1272 bool check_mask) 1273 { 1274 int i; 1275 int ue_cnt = 0; 1276 const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL; 1277 uint32_t field_array_size = 0; 1278 1279 if (is_xgmi_pcs) { 1280 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == 1281 IP_VERSION(6, 1, 0) || 1282 amdgpu_ip_version(adev, XGMI_HWIP, 0) == 1283 IP_VERSION(6, 4, 0)) { 1284 pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0]; 1285 field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields); 1286 } else { 1287 pcs_ras_fields = &xgmi_pcs_ras_fields[0]; 1288 field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields); 1289 } 1290 } else { 1291 pcs_ras_fields = &wafl_pcs_ras_fields[0]; 1292 field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields); 1293 } 1294 1295 if (check_mask) 1296 value = value & ~mask_value; 1297 1298 /* query xgmi/walf pcs error status, 1299 * only ue is supported */ 1300 for (i = 0; value && i < field_array_size; i++) { 1301 ue_cnt = (value & 1302 pcs_ras_fields[i].pcs_err_mask) >> 1303 pcs_ras_fields[i].pcs_err_shift; 1304 if (ue_cnt) { 1305 dev_info(adev->dev, "%s detected\n", 1306 pcs_ras_fields[i].err_name); 1307 *ue_count += ue_cnt; 1308 } 1309 1310 /* reset bit value if the bit is checked */ 1311 value &= ~(pcs_ras_fields[i].pcs_err_mask); 1312 } 1313 1314 return 0; 1315 } 1316 1317 static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev, 1318 void *ras_error_status) 1319 { 1320 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 1321 int i, supported = 1; 1322 uint32_t data, mask_data = 0; 1323 uint32_t ue_cnt = 0, ce_cnt = 0; 1324 1325 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) 1326 return ; 1327 1328 err_data->ue_count = 0; 1329 err_data->ce_count = 0; 1330 1331 switch (adev->asic_type) { 1332 case CHIP_ARCTURUS: 1333 /* check xgmi pcs error */ 1334 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { 1335 data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); 1336 if (data) 1337 amdgpu_xgmi_query_pcs_error_status(adev, data, 1338 mask_data, &ue_cnt, &ce_cnt, true, false); 1339 } 1340 /* check wafl pcs error */ 1341 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { 1342 data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); 1343 if (data) 1344 amdgpu_xgmi_query_pcs_error_status(adev, data, 1345 mask_data, &ue_cnt, &ce_cnt, false, false); 1346 } 1347 break; 1348 case CHIP_VEGA20: 1349 /* check xgmi pcs error */ 1350 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { 1351 data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); 1352 if (data) 1353 amdgpu_xgmi_query_pcs_error_status(adev, data, 1354 mask_data, &ue_cnt, &ce_cnt, true, false); 1355 } 1356 /* check wafl pcs error */ 1357 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { 1358 data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); 1359 if (data) 1360 amdgpu_xgmi_query_pcs_error_status(adev, data, 1361 mask_data, &ue_cnt, &ce_cnt, false, false); 1362 } 1363 break; 1364 case CHIP_ALDEBARAN: 1365 /* check xgmi3x16 pcs error */ 1366 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) { 1367 data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]); 1368 mask_data = 1369 RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]); 1370 if (data) 1371 amdgpu_xgmi_query_pcs_error_status(adev, data, 1372 mask_data, &ue_cnt, &ce_cnt, true, true); 1373 } 1374 /* check wafl pcs error */ 1375 for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) { 1376 data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]); 1377 mask_data = 1378 RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]); 1379 if (data) 1380 amdgpu_xgmi_query_pcs_error_status(adev, data, 1381 mask_data, &ue_cnt, &ce_cnt, false, true); 1382 } 1383 break; 1384 default: 1385 supported = 0; 1386 break; 1387 } 1388 1389 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 1390 case IP_VERSION(6, 4, 0): 1391 /* check xgmi3x16 pcs error */ 1392 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) { 1393 data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]); 1394 mask_data = 1395 RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]); 1396 if (data) 1397 amdgpu_xgmi_query_pcs_error_status(adev, data, 1398 mask_data, &ue_cnt, &ce_cnt, true, true); 1399 } 1400 break; 1401 default: 1402 if (!supported) 1403 dev_warn(adev->dev, "XGMI RAS error query not supported"); 1404 break; 1405 } 1406 1407 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL); 1408 1409 err_data->ue_count += ue_cnt; 1410 err_data->ce_count += ce_cnt; 1411 } 1412 1413 static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status) 1414 { 1415 const char *error_str; 1416 int ext_error_code; 1417 1418 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); 1419 1420 error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ? 1421 xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL; 1422 if (error_str) 1423 dev_info(adev->dev, "%s detected\n", error_str); 1424 1425 switch (ext_error_code) { 1426 case 0: 1427 return ACA_ERROR_TYPE_UE; 1428 case 6: 1429 return ACA_ERROR_TYPE_CE; 1430 default: 1431 return -EINVAL; 1432 } 1433 1434 return -EINVAL; 1435 } 1436 1437 static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info, 1438 u64 mca_base, struct ras_err_data *err_data) 1439 { 1440 int xgmi_inst = mcm_info->die_id; 1441 u64 status = 0; 1442 1443 status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS); 1444 if (!ACA_REG__STATUS__VAL(status)) 1445 return; 1446 1447 switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { 1448 case ACA_ERROR_TYPE_UE: 1449 amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL); 1450 break; 1451 case ACA_ERROR_TYPE_CE: 1452 amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL); 1453 break; 1454 default: 1455 break; 1456 } 1457 1458 WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL); 1459 } 1460 1461 static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data) 1462 { 1463 struct amdgpu_smuio_mcm_config_info mcm_info = { 1464 .socket_id = adev->smuio.funcs->get_socket_id(adev), 1465 .die_id = xgmi_inst, 1466 }; 1467 int i; 1468 1469 for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++) 1470 __xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data); 1471 } 1472 1473 static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) 1474 { 1475 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 1476 int i; 1477 1478 for_each_inst(i, adev->aid_mask) 1479 xgmi_v6_4_0_query_error_count(adev, i, err_data); 1480 } 1481 1482 static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, 1483 void *ras_error_status) 1484 { 1485 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 1486 case IP_VERSION(6, 4, 0): 1487 xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status); 1488 break; 1489 default: 1490 amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status); 1491 break; 1492 } 1493 } 1494 1495 /* Trigger XGMI/WAFL error */ 1496 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, 1497 void *inject_if, uint32_t instance_mask) 1498 { 1499 int ret1, ret2; 1500 struct ta_ras_trigger_error_input *block_info = 1501 (struct ta_ras_trigger_error_input *)inject_if; 1502 1503 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 1504 dev_warn(adev->dev, "Failed to disallow df cstate"); 1505 1506 ret1 = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW); 1507 if (ret1 && ret1 != -EOPNOTSUPP) 1508 dev_warn(adev->dev, "Failed to disallow XGMI power down"); 1509 1510 ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); 1511 1512 if (amdgpu_ras_intr_triggered()) 1513 return ret2; 1514 1515 ret1 = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT); 1516 if (ret1 && ret1 != -EOPNOTSUPP) 1517 dev_warn(adev->dev, "Failed to allow XGMI power down"); 1518 1519 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) 1520 dev_warn(adev->dev, "Failed to allow df cstate"); 1521 1522 return ret2; 1523 } 1524 1525 struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { 1526 .query_ras_error_count = amdgpu_xgmi_query_ras_error_count, 1527 .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count, 1528 .ras_error_inject = amdgpu_ras_error_inject_xgmi, 1529 }; 1530 1531 struct amdgpu_xgmi_ras xgmi_ras = { 1532 .ras_block = { 1533 .hw_ops = &xgmi_ras_hw_ops, 1534 .ras_late_init = amdgpu_xgmi_ras_late_init, 1535 }, 1536 }; 1537 1538 int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev) 1539 { 1540 int err; 1541 struct amdgpu_xgmi_ras *ras; 1542 1543 if (!adev->gmc.xgmi.ras) 1544 return 0; 1545 1546 ras = adev->gmc.xgmi.ras; 1547 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 1548 if (err) { 1549 dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n"); 1550 return err; 1551 } 1552 1553 strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl"); 1554 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL; 1555 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 1556 adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm; 1557 1558 return 0; 1559 } 1560 1561 static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work) 1562 { 1563 struct amdgpu_hive_info *hive = 1564 container_of(work, struct amdgpu_hive_info, reset_on_init_work); 1565 struct amdgpu_reset_context reset_context; 1566 struct amdgpu_device *tmp_adev; 1567 struct list_head device_list; 1568 int r; 1569 1570 mutex_lock(&hive->hive_lock); 1571 1572 INIT_LIST_HEAD(&device_list); 1573 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 1574 list_add_tail(&tmp_adev->reset_list, &device_list); 1575 1576 tmp_adev = list_first_entry(&device_list, struct amdgpu_device, 1577 reset_list); 1578 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); 1579 1580 reset_context.method = AMD_RESET_METHOD_ON_INIT; 1581 reset_context.reset_req_dev = tmp_adev; 1582 reset_context.hive = hive; 1583 reset_context.reset_device_list = &device_list; 1584 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 1585 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 1586 1587 amdgpu_reset_do_xgmi_reset_on_init(&reset_context); 1588 mutex_unlock(&hive->hive_lock); 1589 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); 1590 1591 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 1592 r = amdgpu_ras_init_badpage_info(tmp_adev); 1593 if (r && r != -EHWPOISON) 1594 dev_err(tmp_adev->dev, 1595 "error during bad page data initialization"); 1596 } 1597 } 1598 1599 static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive) 1600 { 1601 INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work); 1602 amdgpu_reset_domain_schedule(hive->reset_domain, 1603 &hive->reset_on_init_work); 1604 } 1605 1606 int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev) 1607 { 1608 struct amdgpu_hive_info *hive; 1609 bool reset_scheduled; 1610 int num_devs; 1611 1612 hive = amdgpu_get_xgmi_hive(adev); 1613 if (!hive) 1614 return -EINVAL; 1615 1616 mutex_lock(&hive->hive_lock); 1617 num_devs = atomic_read(&hive->number_devices); 1618 reset_scheduled = false; 1619 if (num_devs == adev->gmc.xgmi.num_physical_nodes) { 1620 amdgpu_xgmi_schedule_reset_on_init(hive); 1621 reset_scheduled = true; 1622 } 1623 1624 mutex_unlock(&hive->hive_lock); 1625 amdgpu_put_xgmi_hive(hive); 1626 1627 if (reset_scheduled) 1628 flush_work(&hive->reset_on_init_work); 1629 1630 return 0; 1631 } 1632 1633 int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, 1634 struct amdgpu_hive_info *hive, 1635 int req_nps_mode) 1636 { 1637 struct amdgpu_device *tmp_adev; 1638 int cur_nps_mode, r; 1639 1640 /* This is expected to be called only during unload of driver. The 1641 * request needs to be placed only once for all devices in the hive. If 1642 * one of them fail, revert the request for previous successful devices. 1643 * After placing the request, make hive mode as UNKNOWN so that other 1644 * devices don't request anymore. 1645 */ 1646 mutex_lock(&hive->hive_lock); 1647 if (atomic_read(&hive->requested_nps_mode) == 1648 UNKNOWN_MEMORY_PARTITION_MODE) { 1649 dev_dbg(adev->dev, "Unexpected entry for hive NPS change"); 1650 mutex_unlock(&hive->hive_lock); 1651 return 0; 1652 } 1653 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 1654 r = adev->gmc.gmc_funcs->request_mem_partition_mode( 1655 tmp_adev, req_nps_mode); 1656 if (r) 1657 break; 1658 } 1659 if (r) { 1660 /* Request back current mode if one of the requests failed */ 1661 cur_nps_mode = 1662 adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev); 1663 list_for_each_entry_continue_reverse( 1664 tmp_adev, &hive->device_list, gmc.xgmi.head) 1665 adev->gmc.gmc_funcs->request_mem_partition_mode( 1666 tmp_adev, cur_nps_mode); 1667 } 1668 /* Set to UNKNOWN so that other devices don't request anymore */ 1669 atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE); 1670 mutex_unlock(&hive->hive_lock); 1671 1672 return r; 1673 } 1674