1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "nbio/nbio_2_3_offset.h" 26 #include "nbio/nbio_2_3_sh_mask.h" 27 #include "gc/gc_10_1_0_offset.h" 28 #include "gc/gc_10_1_0_sh_mask.h" 29 #include "soc15.h" 30 #include "navi10_ih.h" 31 #include "soc15_common.h" 32 #include "mxgpu_nv.h" 33 34 #include "amdgpu_reset.h" 35 36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) 37 { 38 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 39 } 40 41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) 42 { 43 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 44 } 45 46 /* 47 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 48 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1 49 * by host. 50 * 51 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 52 * correct value since it doesn't return the RCV_DW0 under the case that 53 * RCV_MSG_VALID is set by host. 54 */ 55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) 56 { 57 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); 58 } 59 60 61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, 62 enum idh_event event) 63 { 64 int r = 0; 65 u32 reg; 66 67 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); 68 if (reg == IDH_FAIL) 69 r = -EINVAL; 70 if (reg == IDH_UNRECOV_ERR_NOTIFICATION) 71 r = -ENODEV; 72 else if (reg != event) 73 return -ENOENT; 74 75 xgpu_nv_mailbox_send_ack(adev); 76 77 return r; 78 } 79 80 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) 81 { 82 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 83 } 84 85 static int xgpu_nv_poll_ack(struct amdgpu_device *adev) 86 { 87 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT; 88 u8 reg; 89 90 do { 91 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 92 if (reg & 2) 93 return 0; 94 95 mdelay(5); 96 timeout -= 5; 97 } while (timeout > 1); 98 99 dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT); 100 101 return -ETIME; 102 } 103 104 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) 105 { 106 int r; 107 uint64_t timeout, now; 108 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 109 110 now = (uint64_t)ktime_to_ms(ktime_get()); 111 timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT; 112 113 do { 114 r = xgpu_nv_mailbox_rcv_msg(adev, event); 115 if (!r) { 116 dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", 117 event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now); 118 return 0; 119 } else if (r == -ENODEV) { 120 if (!amdgpu_ras_is_rma(adev)) { 121 ras->is_rma = true; 122 dev_err(adev->dev, "VF is in an unrecoverable state. " 123 "Runtime Services are halted.\n"); 124 } 125 return r; 126 } 127 128 msleep(10); 129 now = (uint64_t)ktime_to_ms(ktime_get()); 130 } while (timeout > now); 131 132 dev_dbg(adev->dev, "nv_poll_msg timed out\n"); 133 134 return -ETIME; 135 } 136 137 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, 138 enum idh_request req, u32 data1, u32 data2, u32 data3) 139 { 140 int r; 141 uint8_t trn; 142 143 /* IMPORTANT: 144 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 145 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 146 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack() 147 * will return immediatly 148 */ 149 do { 150 xgpu_nv_mailbox_set_valid(adev, false); 151 trn = xgpu_nv_peek_ack(adev); 152 if (trn) { 153 dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn); 154 msleep(1); 155 } 156 } while (trn); 157 158 dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1); 159 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req); 160 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1); 161 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2); 162 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3); 163 xgpu_nv_mailbox_set_valid(adev, true); 164 165 /* start to poll ack */ 166 r = xgpu_nv_poll_ack(adev); 167 if (r) 168 dev_err(adev->dev, "Doesn't get ack from pf, continue\n"); 169 170 xgpu_nv_mailbox_set_valid(adev, false); 171 } 172 173 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, 174 enum idh_request req, u32 data1, u32 data2, u32 data3) 175 { 176 struct amdgpu_virt *virt = &adev->virt; 177 int r = 0, retry = 1; 178 enum idh_event event = -1; 179 180 mutex_lock(&virt->access_req_mutex); 181 send_request: 182 183 if (amdgpu_ras_is_rma(adev)) { 184 r = -ENODEV; 185 goto out; 186 } 187 188 xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); 189 190 switch (req) { 191 case IDH_REQ_GPU_INIT_ACCESS: 192 case IDH_REQ_GPU_FINI_ACCESS: 193 case IDH_REQ_GPU_RESET_ACCESS: 194 event = IDH_READY_TO_ACCESS_GPU; 195 break; 196 case IDH_REQ_GPU_INIT_DATA: 197 event = IDH_REQ_GPU_INIT_DATA_READY; 198 break; 199 case IDH_RAS_POISON: 200 if (data1 != 0) 201 event = IDH_RAS_POISON_READY; 202 break; 203 case IDH_REQ_RAS_ERROR_COUNT: 204 event = IDH_RAS_ERROR_COUNT_READY; 205 break; 206 case IDH_REQ_RAS_CPER_DUMP: 207 event = IDH_RAS_CPER_DUMP_READY; 208 break; 209 case IDH_REQ_RAS_CHK_CRITI: 210 event = IDH_REQ_RAS_CHK_CRITI_READY; 211 break; 212 case IDH_REQ_RAS_REMOTE_CMD: 213 event = IDH_REQ_RAS_REMOTE_CMD_READY; 214 break; 215 default: 216 break; 217 } 218 219 if (event != -1) { 220 r = xgpu_nv_poll_msg(adev, event); 221 if (r) { 222 if (retry++ < 5) 223 goto send_request; 224 225 if (req != IDH_REQ_GPU_INIT_DATA) { 226 dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r); 227 goto out; 228 } else /* host doesn't support REQ_GPU_INIT_DATA handshake */ 229 adev->virt.req_init_data_ver = 0; 230 } else { 231 if (req == IDH_REQ_GPU_INIT_DATA) { 232 switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) { 233 case GPU_CRIT_REGION_V2: 234 adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2; 235 adev->virt.init_data_header.offset = 236 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); 237 adev->virt.init_data_header.size_kb = 238 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3); 239 break; 240 default: 241 adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1; 242 adev->virt.init_data_header.offset = -1; 243 adev->virt.init_data_header.size_kb = 0; 244 break; 245 } 246 } 247 } 248 249 /* Retrieve checksum from mailbox2 */ 250 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 251 adev->virt.fw_reserve.checksum_key = 252 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); 253 } 254 } 255 256 out: 257 mutex_unlock(&virt->access_req_mutex); 258 259 return r; 260 } 261 262 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, 263 enum idh_request req) 264 { 265 return xgpu_nv_send_access_requests_with_param(adev, 266 req, 0, 0, 0); 267 } 268 269 static int xgpu_nv_request_reset(struct amdgpu_device *adev) 270 { 271 int ret, i = 0; 272 273 while (i < NV_MAILBOX_POLL_MSG_REP_MAX) { 274 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 275 if (!ret) 276 break; 277 i++; 278 } 279 280 return ret; 281 } 282 283 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, 284 bool init) 285 { 286 enum idh_request req; 287 288 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 289 return xgpu_nv_send_access_requests(adev, req); 290 } 291 292 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, 293 bool init) 294 { 295 enum idh_request req; 296 int r = 0; 297 298 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 299 r = xgpu_nv_send_access_requests(adev, req); 300 301 return r; 302 } 303 304 static int xgpu_nv_request_init_data(struct amdgpu_device *adev) 305 { 306 return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA, 307 0, GPU_CRIT_REGION_V2, 0); 308 } 309 310 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, 311 struct amdgpu_irq_src *source, 312 struct amdgpu_iv_entry *entry) 313 { 314 dev_dbg(adev->dev, "get ack intr and do nothing.\n"); 315 return 0; 316 } 317 318 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, 319 struct amdgpu_irq_src *source, 320 unsigned type, 321 enum amdgpu_interrupt_state state) 322 { 323 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); 324 325 if (state == AMDGPU_IRQ_STATE_ENABLE) 326 tmp |= 2; 327 else 328 tmp &= ~2; 329 330 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); 331 332 return 0; 333 } 334 335 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev) 336 { 337 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); 338 } 339 340 static int xgpu_nv_wait_reset(struct amdgpu_device *adev) 341 { 342 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; 343 do { 344 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) { 345 dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout); 346 return 0; 347 } 348 msleep(10); 349 timeout -= 10; 350 } while (timeout > 1); 351 352 dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n"); 353 return -ETIME; 354 } 355 356 static void xgpu_nv_mailbox_flr_work(struct work_struct *work) 357 { 358 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 359 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 360 struct amdgpu_reset_context reset_context = { 0 }; 361 362 amdgpu_virt_fini_data_exchange(adev); 363 364 /* Trigger recovery for world switch failure if no TDR */ 365 if (amdgpu_device_should_recover_gpu(adev) 366 && (!amdgpu_device_has_job_running(adev) || 367 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || 368 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || 369 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || 370 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) { 371 372 reset_context.method = AMD_RESET_METHOD_NONE; 373 reset_context.reset_req_dev = adev; 374 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 375 set_bit(AMDGPU_HOST_FLR, &reset_context.flags); 376 377 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 378 } 379 } 380 381 static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work) 382 { 383 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work); 384 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 385 386 if (down_read_trylock(&adev->reset_domain->sem)) { 387 amdgpu_virt_fini_data_exchange(adev); 388 amdgpu_virt_request_bad_pages(adev); 389 up_read(&adev->reset_domain->sem); 390 } 391 } 392 393 /** 394 * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information 395 * @work: pointer to the work_struct 396 * 397 * This work handler is triggered when bad pages are ready, and it reinitializes 398 * the data exchange region to retrieve updated bad page information from the host. 399 */ 400 static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work) 401 { 402 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work); 403 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 404 405 if (down_read_trylock(&adev->reset_domain->sem)) { 406 amdgpu_virt_fini_data_exchange(adev); 407 amdgpu_virt_init_data_exchange(adev); 408 up_read(&adev->reset_domain->sem); 409 } 410 } 411 412 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, 413 struct amdgpu_irq_src *src, 414 unsigned type, 415 enum amdgpu_interrupt_state state) 416 { 417 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); 418 419 if (state == AMDGPU_IRQ_STATE_ENABLE) 420 tmp |= 1; 421 else 422 tmp &= ~1; 423 424 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); 425 426 return 0; 427 } 428 429 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, 430 struct amdgpu_irq_src *source, 431 struct amdgpu_iv_entry *entry) 432 { 433 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); 434 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 435 436 switch (event) { 437 case IDH_RAS_BAD_PAGES_READY: 438 xgpu_nv_mailbox_send_ack(adev); 439 if (amdgpu_sriov_runtime(adev)) 440 schedule_work(&adev->virt.handle_bad_pages_work); 441 break; 442 case IDH_RAS_BAD_PAGES_NOTIFICATION: 443 xgpu_nv_mailbox_send_ack(adev); 444 if (amdgpu_sriov_runtime(adev)) 445 schedule_work(&adev->virt.req_bad_pages_work); 446 break; 447 case IDH_UNRECOV_ERR_NOTIFICATION: 448 xgpu_nv_mailbox_send_ack(adev); 449 if (!amdgpu_ras_is_rma(adev)) { 450 ras->is_rma = true; 451 dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n"); 452 } 453 454 if (amdgpu_sriov_runtime(adev)) 455 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, 456 &adev->virt.flr_work), 457 "Failed to queue work! at %s", 458 __func__); 459 break; 460 case IDH_FLR_NOTIFICATION: 461 if (amdgpu_sriov_runtime(adev)) 462 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, 463 &adev->virt.flr_work), 464 "Failed to queue work! at %s", 465 __func__); 466 break; 467 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 468 * it byfar since that polling thread will handle it, 469 * other msg like flr complete is not handled here. 470 */ 471 case IDH_CLR_MSG_BUF: 472 case IDH_FLR_NOTIFICATION_CMPL: 473 case IDH_READY_TO_ACCESS_GPU: 474 default: 475 break; 476 } 477 478 return 0; 479 } 480 481 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = { 482 .set = xgpu_nv_set_mailbox_ack_irq, 483 .process = xgpu_nv_mailbox_ack_irq, 484 }; 485 486 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = { 487 .set = xgpu_nv_set_mailbox_rcv_irq, 488 .process = xgpu_nv_mailbox_rcv_irq, 489 }; 490 491 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) 492 { 493 adev->virt.ack_irq.num_types = 1; 494 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; 495 adev->virt.rcv_irq.num_types = 1; 496 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; 497 } 498 499 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) 500 { 501 int r; 502 503 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 504 if (r) 505 return r; 506 507 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 508 if (r) { 509 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 510 return r; 511 } 512 513 return 0; 514 } 515 516 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) 517 { 518 int r; 519 520 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 521 if (r) 522 return r; 523 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 524 if (r) { 525 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 526 return r; 527 } 528 529 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); 530 INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work); 531 INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work); 532 533 return 0; 534 } 535 536 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) 537 { 538 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 539 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 540 } 541 542 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, 543 enum amdgpu_ras_block block) 544 { 545 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) { 546 xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); 547 } else { 548 amdgpu_virt_fini_data_exchange(adev); 549 xgpu_nv_send_access_requests_with_param(adev, 550 IDH_RAS_POISON, block, 0, 0); 551 } 552 } 553 554 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev) 555 { 556 enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev); 557 558 return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF); 559 } 560 561 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev) 562 { 563 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT); 564 } 565 566 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr) 567 { 568 uint32_t vf_rptr_hi, vf_rptr_lo; 569 570 vf_rptr_hi = (uint32_t)(vf_rptr >> 32); 571 vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF); 572 return xgpu_nv_send_access_requests_with_param( 573 adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0); 574 } 575 576 static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev) 577 { 578 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES); 579 } 580 581 static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr) 582 { 583 uint32_t addr_hi, addr_lo; 584 585 addr_hi = (uint32_t)(addr >> 32); 586 addr_lo = (uint32_t)(addr & 0xFFFFFFFF); 587 return xgpu_nv_send_access_requests_with_param( 588 adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0); 589 } 590 591 static int xgpu_nv_req_remote_ras_cmd(struct amdgpu_device *adev, 592 u32 param1, u32 param2, u32 param3) 593 { 594 return xgpu_nv_send_access_requests_with_param( 595 adev, IDH_REQ_RAS_REMOTE_CMD, param1, param2, param3); 596 } 597 598 const struct amdgpu_virt_ops xgpu_nv_virt_ops = { 599 .req_full_gpu = xgpu_nv_request_full_gpu_access, 600 .rel_full_gpu = xgpu_nv_release_full_gpu_access, 601 .req_init_data = xgpu_nv_request_init_data, 602 .reset_gpu = xgpu_nv_request_reset, 603 .ready_to_reset = xgpu_nv_ready_to_reset, 604 .wait_reset = xgpu_nv_wait_reset, 605 .trans_msg = xgpu_nv_mailbox_trans_msg, 606 .ras_poison_handler = xgpu_nv_ras_poison_handler, 607 .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, 608 .req_ras_err_count = xgpu_nv_req_ras_err_count, 609 .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump, 610 .req_bad_pages = xgpu_nv_req_ras_bad_pages, 611 .req_ras_chk_criti = xgpu_nv_check_vf_critical_region, 612 .req_remote_ras_cmd = xgpu_nv_req_remote_ras_cmd, 613 }; 614