1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/sort.h>
25 #include "amdgpu.h"
26 #include "umc_v6_7.h"
27 #define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
28
29 #define MAX_UMC_HASH_STRING_SIZE 256
30
amdgpu_umc_convert_error_address(struct amdgpu_device * adev,struct ras_err_data * err_data,uint64_t err_addr,uint32_t ch_inst,uint32_t umc_inst)31 static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
32 struct ras_err_data *err_data, uint64_t err_addr,
33 uint32_t ch_inst, uint32_t umc_inst)
34 {
35 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
36 case IP_VERSION(6, 7, 0):
37 umc_v6_7_convert_error_address(adev,
38 err_data, err_addr, ch_inst, umc_inst);
39 break;
40 default:
41 dev_warn(adev->dev,
42 "UMC address to Physical address translation is not supported\n");
43 return AMDGPU_RAS_FAIL;
44 }
45
46 return AMDGPU_RAS_SUCCESS;
47 }
48
amdgpu_umc_page_retirement_mca(struct amdgpu_device * adev,uint64_t err_addr,uint32_t ch_inst,uint32_t umc_inst)49 int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
50 uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
51 {
52 struct ras_err_data err_data;
53 int ret;
54
55 ret = amdgpu_ras_error_data_init(&err_data);
56 if (ret)
57 return ret;
58
59 err_data.err_addr =
60 kcalloc(adev->umc.max_ras_err_cnt_per_query,
61 sizeof(struct eeprom_table_record), GFP_KERNEL);
62 if (!err_data.err_addr) {
63 dev_warn(adev->dev,
64 "Failed to alloc memory for umc error record in MCA notifier!\n");
65 ret = AMDGPU_RAS_FAIL;
66 goto out_fini_err_data;
67 }
68
69 err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
70
71 /*
72 * Translate UMC channel address to Physical address
73 */
74 ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
75 ch_inst, umc_inst);
76 if (ret)
77 goto out_free_err_addr;
78
79 if (amdgpu_bad_page_threshold != 0) {
80 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
81 err_data.err_addr_cnt);
82 amdgpu_ras_save_bad_pages(adev, NULL);
83 }
84
85 out_free_err_addr:
86 kfree(err_data.err_addr);
87
88 out_fini_err_data:
89 amdgpu_ras_error_data_fini(&err_data);
90
91 return ret;
92 }
93
amdgpu_umc_handle_bad_pages(struct amdgpu_device * adev,void * ras_error_status)94 void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
95 void *ras_error_status)
96 {
97 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
98 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
99 unsigned int error_query_mode;
100 int ret = 0;
101 unsigned long err_count;
102
103 amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
104
105 mutex_lock(&con->page_retirement_lock);
106 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
107 if (ret == -EOPNOTSUPP &&
108 error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
109 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
110 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
111 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
112
113 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
114 adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
115 adev->umc.max_ras_err_cnt_per_query) {
116 err_data->err_addr =
117 kcalloc(adev->umc.max_ras_err_cnt_per_query,
118 sizeof(struct eeprom_table_record), GFP_KERNEL);
119
120 /* still call query_ras_error_address to clear error status
121 * even NOMEM error is encountered
122 */
123 if(!err_data->err_addr)
124 dev_warn(adev->dev, "Failed to alloc memory for "
125 "umc error address record!\n");
126 else
127 err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
128
129 /* umc query_ras_error_address is also responsible for clearing
130 * error status
131 */
132 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
133 }
134 } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
135 (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
136 if (adev->umc.ras &&
137 adev->umc.ras->ecc_info_query_ras_error_count)
138 adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
139
140 if (adev->umc.ras &&
141 adev->umc.ras->ecc_info_query_ras_error_address &&
142 adev->umc.max_ras_err_cnt_per_query) {
143 err_data->err_addr =
144 kcalloc(adev->umc.max_ras_err_cnt_per_query,
145 sizeof(struct eeprom_table_record), GFP_KERNEL);
146
147 /* still call query_ras_error_address to clear error status
148 * even NOMEM error is encountered
149 */
150 if(!err_data->err_addr)
151 dev_warn(adev->dev, "Failed to alloc memory for "
152 "umc error address record!\n");
153 else
154 err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
155
156 /* umc query_ras_error_address is also responsible for clearing
157 * error status
158 */
159 adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
160 }
161 }
162
163 /* only uncorrectable error needs gpu reset */
164 if (err_data->ue_count || err_data->de_count) {
165 err_count = err_data->ue_count + err_data->de_count;
166 if ((amdgpu_bad_page_threshold != 0) &&
167 err_data->err_addr_cnt) {
168 amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
169 err_data->err_addr_cnt);
170 amdgpu_ras_save_bad_pages(adev, &err_count);
171
172 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
173
174 if (con->update_channel_flag == true) {
175 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
176 con->update_channel_flag = false;
177 }
178 }
179 }
180
181 kfree(err_data->err_addr);
182 err_data->err_addr = NULL;
183
184 mutex_unlock(&con->page_retirement_lock);
185 }
186
amdgpu_umc_do_page_retirement(struct amdgpu_device * adev,void * ras_error_status,struct amdgpu_iv_entry * entry,uint32_t reset)187 static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
188 void *ras_error_status,
189 struct amdgpu_iv_entry *entry,
190 uint32_t reset)
191 {
192 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
193 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
194
195 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
196 amdgpu_umc_handle_bad_pages(adev, ras_error_status);
197
198 if ((err_data->ue_count || err_data->de_count) &&
199 (reset || amdgpu_ras_is_rma(adev))) {
200 con->gpu_reset_flags |= reset;
201 amdgpu_ras_reset_gpu(adev);
202 }
203
204 return AMDGPU_RAS_SUCCESS;
205 }
206
amdgpu_umc_pasid_poison_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint16_t pasid,pasid_notify pasid_fn,void * data,uint32_t reset)207 int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
208 enum amdgpu_ras_block block, uint16_t pasid,
209 pasid_notify pasid_fn, void *data, uint32_t reset)
210 {
211 int ret = AMDGPU_RAS_SUCCESS;
212
213 if (adev->gmc.xgmi.connected_to_cpu ||
214 adev->gmc.is_app_apu) {
215 if (reset) {
216 /* MCA poison handler is only responsible for GPU reset,
217 * let MCA notifier do page retirement.
218 */
219 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
220 amdgpu_ras_reset_gpu(adev);
221 }
222 return ret;
223 }
224
225 if (!amdgpu_sriov_vf(adev)) {
226 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
227 struct ras_err_data err_data;
228 struct ras_common_if head = {
229 .block = AMDGPU_RAS_BLOCK__UMC,
230 };
231 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
232
233 ret = amdgpu_ras_error_data_init(&err_data);
234 if (ret)
235 return ret;
236
237 ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
238
239 if (ret == AMDGPU_RAS_SUCCESS && obj) {
240 obj->err_data.ue_count += err_data.ue_count;
241 obj->err_data.ce_count += err_data.ce_count;
242 obj->err_data.de_count += err_data.de_count;
243 }
244
245 amdgpu_ras_error_data_fini(&err_data);
246 } else {
247 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
248 int ret;
249
250 ret = amdgpu_ras_put_poison_req(adev,
251 block, pasid, pasid_fn, data, reset);
252 if (!ret) {
253 atomic_inc(&con->page_retirement_req_cnt);
254 wake_up(&con->page_retirement_wq);
255 }
256 }
257 } else {
258 if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
259 adev->virt.ops->ras_poison_handler(adev, block);
260 else
261 dev_warn(adev->dev,
262 "No ras_poison_handler interface in SRIOV!\n");
263 }
264
265 return ret;
266 }
267
amdgpu_umc_poison_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t reset)268 int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
269 enum amdgpu_ras_block block, uint32_t reset)
270 {
271 return amdgpu_umc_pasid_poison_handler(adev,
272 block, 0, NULL, NULL, reset);
273 }
274
amdgpu_umc_process_ras_data_cb(struct amdgpu_device * adev,void * ras_error_status,struct amdgpu_iv_entry * entry)275 int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
276 void *ras_error_status,
277 struct amdgpu_iv_entry *entry)
278 {
279 return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
280 AMDGPU_RAS_GPU_RESET_MODE1_RESET);
281 }
282
amdgpu_umc_ras_sw_init(struct amdgpu_device * adev)283 int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
284 {
285 int err;
286 struct amdgpu_umc_ras *ras;
287
288 if (!adev->umc.ras)
289 return 0;
290
291 ras = adev->umc.ras;
292
293 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
294 if (err) {
295 dev_err(adev->dev, "Failed to register umc ras block!\n");
296 return err;
297 }
298
299 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
300 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
301 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
302 adev->umc.ras_if = &ras->ras_block.ras_comm;
303
304 if (!ras->ras_block.ras_late_init)
305 ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
306
307 if (!ras->ras_block.ras_cb)
308 ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
309
310 return 0;
311 }
312
amdgpu_umc_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)313 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
314 {
315 int r;
316
317 r = amdgpu_ras_block_late_init(adev, ras_block);
318 if (r)
319 return r;
320
321 if (amdgpu_sriov_vf(adev))
322 return r;
323
324 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
325 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
326 if (r)
327 goto late_fini;
328 }
329
330 /* ras init of specific umc version */
331 if (adev->umc.ras &&
332 adev->umc.ras->err_cnt_init)
333 adev->umc.ras->err_cnt_init(adev);
334
335 return 0;
336
337 late_fini:
338 amdgpu_ras_block_late_fini(adev, ras_block);
339 return r;
340 }
341
amdgpu_umc_process_ecc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)342 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
343 struct amdgpu_irq_src *source,
344 struct amdgpu_iv_entry *entry)
345 {
346 struct ras_common_if *ras_if = adev->umc.ras_if;
347 struct ras_dispatch_if ih_data = {
348 .entry = entry,
349 };
350
351 if (!ras_if)
352 return 0;
353
354 ih_data.head = *ras_if;
355
356 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
357 return 0;
358 }
359
amdgpu_umc_fill_error_record(struct ras_err_data * err_data,uint64_t err_addr,uint64_t retired_page,uint32_t channel_index,uint32_t umc_inst)360 int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
361 uint64_t err_addr,
362 uint64_t retired_page,
363 uint32_t channel_index,
364 uint32_t umc_inst)
365 {
366 struct eeprom_table_record *err_rec;
367
368 if (!err_data ||
369 !err_data->err_addr ||
370 (err_data->err_addr_cnt >= err_data->err_addr_len))
371 return -EINVAL;
372
373 err_rec = &err_data->err_addr[err_data->err_addr_cnt];
374
375 err_rec->address = err_addr;
376 /* page frame address is saved */
377 err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
378 err_rec->ts = (uint64_t)ktime_get_real_seconds();
379 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
380 err_rec->cu = 0;
381 err_rec->mem_channel = channel_index;
382 err_rec->mcumc_id = umc_inst;
383
384 err_data->err_addr_cnt++;
385
386 return 0;
387 }
388
amdgpu_umc_loop_channels(struct amdgpu_device * adev,umc_func func,void * data)389 int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
390 umc_func func, void *data)
391 {
392 uint32_t node_inst = 0;
393 uint32_t umc_inst = 0;
394 uint32_t ch_inst = 0;
395 int ret = 0;
396
397 if (adev->umc.node_inst_num) {
398 LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
399 ret = func(adev, node_inst, umc_inst, ch_inst, data);
400 if (ret) {
401 dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
402 node_inst, umc_inst, ch_inst, ret);
403 return ret;
404 }
405 }
406 } else {
407 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
408 ret = func(adev, 0, umc_inst, ch_inst, data);
409 if (ret) {
410 dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
411 umc_inst, ch_inst, ret);
412 return ret;
413 }
414 }
415 }
416
417 return 0;
418 }
419
amdgpu_umc_update_ecc_status(struct amdgpu_device * adev,uint64_t status,uint64_t ipid,uint64_t addr)420 int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
421 uint64_t status, uint64_t ipid, uint64_t addr)
422 {
423 if (adev->umc.ras->update_ecc_status)
424 return adev->umc.ras->update_ecc_status(adev,
425 status, ipid, addr);
426 return 0;
427 }
428
amdgpu_umc_logs_ecc_err(struct amdgpu_device * adev,struct radix_tree_root * ecc_tree,struct ras_ecc_err * ecc_err)429 int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
430 struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
431 {
432 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
433 struct ras_ecc_log_info *ecc_log;
434 int ret;
435
436 ecc_log = &con->umc_ecc_log;
437
438 mutex_lock(&ecc_log->lock);
439 ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
440 if (!ret)
441 radix_tree_tag_set(ecc_tree,
442 ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
443 mutex_unlock(&ecc_log->lock);
444
445 return ret;
446 }
447