1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "nbio/nbio_2_3_offset.h" 26 #include "nbio/nbio_2_3_sh_mask.h" 27 #include "gc/gc_10_1_0_offset.h" 28 #include "gc/gc_10_1_0_sh_mask.h" 29 #include "soc15.h" 30 #include "navi10_ih.h" 31 #include "soc15_common.h" 32 #include "mxgpu_nv.h" 33 34 #include "amdgpu_reset.h" 35 36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) 37 { 38 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 39 } 40 41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) 42 { 43 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 44 } 45 46 /* 47 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 48 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1 49 * by host. 50 * 51 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 52 * correct value since it doesn't return the RCV_DW0 under the case that 53 * RCV_MSG_VALID is set by host. 54 */ 55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) 56 { 57 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); 58 } 59 60 61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, 62 enum idh_event event) 63 { 64 u32 reg; 65 66 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); 67 if (reg != event) 68 return -ENOENT; 69 70 xgpu_nv_mailbox_send_ack(adev); 71 72 return 0; 73 } 74 75 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) 76 { 77 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 78 } 79 80 static int xgpu_nv_poll_ack(struct amdgpu_device *adev) 81 { 82 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT; 83 u8 reg; 84 85 do { 86 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 87 if (reg & 2) 88 return 0; 89 90 mdelay(5); 91 timeout -= 5; 92 } while (timeout > 1); 93 94 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT); 95 96 return -ETIME; 97 } 98 99 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) 100 { 101 int r; 102 uint64_t timeout, now; 103 104 now = (uint64_t)ktime_to_ms(ktime_get()); 105 timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT; 106 107 do { 108 r = xgpu_nv_mailbox_rcv_msg(adev, event); 109 if (!r) 110 return 0; 111 112 msleep(10); 113 now = (uint64_t)ktime_to_ms(ktime_get()); 114 } while (timeout > now); 115 116 117 return -ETIME; 118 } 119 120 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, 121 enum idh_request req, u32 data1, u32 data2, u32 data3) 122 { 123 int r; 124 uint8_t trn; 125 126 /* IMPORTANT: 127 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 128 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 129 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack() 130 * will return immediatly 131 */ 132 do { 133 xgpu_nv_mailbox_set_valid(adev, false); 134 trn = xgpu_nv_peek_ack(adev); 135 if (trn) { 136 pr_err("trn=%x ACK should not assert! wait again !\n", trn); 137 msleep(1); 138 } 139 } while (trn); 140 141 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req); 142 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1); 143 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2); 144 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3); 145 xgpu_nv_mailbox_set_valid(adev, true); 146 147 /* start to poll ack */ 148 r = xgpu_nv_poll_ack(adev); 149 if (r) 150 pr_err("Doesn't get ack from pf, continue\n"); 151 152 xgpu_nv_mailbox_set_valid(adev, false); 153 } 154 155 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, 156 enum idh_request req, u32 data1, u32 data2, u32 data3) 157 { 158 int r, retry = 1; 159 enum idh_event event = -1; 160 161 send_request: 162 xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); 163 164 switch (req) { 165 case IDH_REQ_GPU_INIT_ACCESS: 166 case IDH_REQ_GPU_FINI_ACCESS: 167 case IDH_REQ_GPU_RESET_ACCESS: 168 event = IDH_READY_TO_ACCESS_GPU; 169 break; 170 case IDH_REQ_GPU_INIT_DATA: 171 event = IDH_REQ_GPU_INIT_DATA_READY; 172 break; 173 case IDH_RAS_POISON: 174 if (data1 != 0) 175 event = IDH_RAS_POISON_READY; 176 break; 177 default: 178 break; 179 } 180 181 if (event != -1) { 182 r = xgpu_nv_poll_msg(adev, event); 183 if (r) { 184 if (retry++ < 5) 185 goto send_request; 186 187 if (req != IDH_REQ_GPU_INIT_DATA) { 188 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); 189 return r; 190 } else /* host doesn't support REQ_GPU_INIT_DATA handshake */ 191 adev->virt.req_init_data_ver = 0; 192 } else { 193 if (req == IDH_REQ_GPU_INIT_DATA) { 194 adev->virt.req_init_data_ver = 195 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); 196 197 /* assume V1 in case host doesn't set version number */ 198 if (adev->virt.req_init_data_ver < 1) 199 adev->virt.req_init_data_ver = 1; 200 } 201 } 202 203 /* Retrieve checksum from mailbox2 */ 204 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 205 adev->virt.fw_reserve.checksum_key = 206 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); 207 } 208 } 209 210 return 0; 211 } 212 213 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, 214 enum idh_request req) 215 { 216 return xgpu_nv_send_access_requests_with_param(adev, 217 req, 0, 0, 0); 218 } 219 220 static int xgpu_nv_request_reset(struct amdgpu_device *adev) 221 { 222 int ret, i = 0; 223 224 while (i < NV_MAILBOX_POLL_MSG_REP_MAX) { 225 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 226 if (!ret) 227 break; 228 i++; 229 } 230 231 return ret; 232 } 233 234 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, 235 bool init) 236 { 237 enum idh_request req; 238 239 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 240 return xgpu_nv_send_access_requests(adev, req); 241 } 242 243 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, 244 bool init) 245 { 246 enum idh_request req; 247 int r = 0; 248 249 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 250 r = xgpu_nv_send_access_requests(adev, req); 251 252 return r; 253 } 254 255 static int xgpu_nv_request_init_data(struct amdgpu_device *adev) 256 { 257 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); 258 } 259 260 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, 261 struct amdgpu_irq_src *source, 262 struct amdgpu_iv_entry *entry) 263 { 264 DRM_DEBUG("get ack intr and do nothing.\n"); 265 return 0; 266 } 267 268 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, 269 struct amdgpu_irq_src *source, 270 unsigned type, 271 enum amdgpu_interrupt_state state) 272 { 273 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); 274 275 if (state == AMDGPU_IRQ_STATE_ENABLE) 276 tmp |= 2; 277 else 278 tmp &= ~2; 279 280 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); 281 282 return 0; 283 } 284 285 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev) 286 { 287 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); 288 } 289 290 static int xgpu_nv_wait_reset(struct amdgpu_device *adev) 291 { 292 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; 293 do { 294 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) 295 return 0; 296 msleep(10); 297 timeout -= 10; 298 } while (timeout > 1); 299 dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); 300 return -ETIME; 301 } 302 303 static void xgpu_nv_mailbox_flr_work(struct work_struct *work) 304 { 305 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 306 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 307 308 amdgpu_virt_fini_data_exchange(adev); 309 310 /* Trigger recovery for world switch failure if no TDR */ 311 if (amdgpu_device_should_recover_gpu(adev) 312 && (!amdgpu_device_has_job_running(adev) || 313 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || 314 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || 315 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || 316 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) { 317 struct amdgpu_reset_context reset_context; 318 memset(&reset_context, 0, sizeof(reset_context)); 319 320 reset_context.method = AMD_RESET_METHOD_NONE; 321 reset_context.reset_req_dev = adev; 322 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 323 set_bit(AMDGPU_HOST_FLR, &reset_context.flags); 324 325 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 326 } 327 } 328 329 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, 330 struct amdgpu_irq_src *src, 331 unsigned type, 332 enum amdgpu_interrupt_state state) 333 { 334 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); 335 336 if (state == AMDGPU_IRQ_STATE_ENABLE) 337 tmp |= 1; 338 else 339 tmp &= ~1; 340 341 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); 342 343 return 0; 344 } 345 346 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, 347 struct amdgpu_irq_src *source, 348 struct amdgpu_iv_entry *entry) 349 { 350 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); 351 352 switch (event) { 353 case IDH_FLR_NOTIFICATION: 354 if (amdgpu_sriov_runtime(adev)) 355 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, 356 &adev->virt.flr_work), 357 "Failed to queue work! at %s", 358 __func__); 359 break; 360 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 361 * it byfar since that polling thread will handle it, 362 * other msg like flr complete is not handled here. 363 */ 364 case IDH_CLR_MSG_BUF: 365 case IDH_FLR_NOTIFICATION_CMPL: 366 case IDH_READY_TO_ACCESS_GPU: 367 default: 368 break; 369 } 370 371 return 0; 372 } 373 374 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = { 375 .set = xgpu_nv_set_mailbox_ack_irq, 376 .process = xgpu_nv_mailbox_ack_irq, 377 }; 378 379 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = { 380 .set = xgpu_nv_set_mailbox_rcv_irq, 381 .process = xgpu_nv_mailbox_rcv_irq, 382 }; 383 384 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) 385 { 386 adev->virt.ack_irq.num_types = 1; 387 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; 388 adev->virt.rcv_irq.num_types = 1; 389 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; 390 } 391 392 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) 393 { 394 int r; 395 396 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 397 if (r) 398 return r; 399 400 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 401 if (r) { 402 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 403 return r; 404 } 405 406 return 0; 407 } 408 409 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) 410 { 411 int r; 412 413 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 414 if (r) 415 return r; 416 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 417 if (r) { 418 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 419 return r; 420 } 421 422 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); 423 424 return 0; 425 } 426 427 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) 428 { 429 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 430 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 431 } 432 433 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, 434 enum amdgpu_ras_block block) 435 { 436 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) { 437 xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); 438 } else { 439 amdgpu_virt_fini_data_exchange(adev); 440 xgpu_nv_send_access_requests_with_param(adev, 441 IDH_RAS_POISON, block, 0, 0); 442 } 443 } 444 445 const struct amdgpu_virt_ops xgpu_nv_virt_ops = { 446 .req_full_gpu = xgpu_nv_request_full_gpu_access, 447 .rel_full_gpu = xgpu_nv_release_full_gpu_access, 448 .req_init_data = xgpu_nv_request_init_data, 449 .reset_gpu = xgpu_nv_request_reset, 450 .ready_to_reset = xgpu_nv_ready_to_reset, 451 .wait_reset = xgpu_nv_wait_reset, 452 .trans_msg = xgpu_nv_mailbox_trans_msg, 453 .ras_poison_handler = xgpu_nv_ras_poison_handler, 454 }; 455