1c9c9de93SXiangliang Yu /* 2c9c9de93SXiangliang Yu * Copyright 2014 Advanced Micro Devices, Inc. 3c9c9de93SXiangliang Yu * 4c9c9de93SXiangliang Yu * Permission is hereby granted, free of charge, to any person obtaining a 5c9c9de93SXiangliang Yu * copy of this software and associated documentation files (the "Software"), 6c9c9de93SXiangliang Yu * to deal in the Software without restriction, including without limitation 7c9c9de93SXiangliang Yu * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c9c9de93SXiangliang Yu * and/or sell copies of the Software, and to permit persons to whom the 9c9c9de93SXiangliang Yu * Software is furnished to do so, subject to the following conditions: 10c9c9de93SXiangliang Yu * 11c9c9de93SXiangliang Yu * The above copyright notice and this permission notice shall be included in 12c9c9de93SXiangliang Yu * all copies or substantial portions of the Software. 13c9c9de93SXiangliang Yu * 14c9c9de93SXiangliang Yu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c9c9de93SXiangliang Yu * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c9c9de93SXiangliang Yu * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c9c9de93SXiangliang Yu * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c9c9de93SXiangliang Yu * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c9c9de93SXiangliang Yu * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c9c9de93SXiangliang Yu * OTHER DEALINGS IN THE SOFTWARE. 21c9c9de93SXiangliang Yu * 22c9c9de93SXiangliang Yu */ 23c9c9de93SXiangliang Yu 24c9c9de93SXiangliang Yu #include "amdgpu.h" 25f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_offset.h" 26f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_sh_mask.h" 27cde5c34fSFeifei Xu #include "gc/gc_9_0_offset.h" 28cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h" 2978d48112STrigger Huang #include "mp/mp_9_0_offset.h" 30c9c9de93SXiangliang Yu #include "soc15.h" 31f98b617eSMonk Liu #include "vega10_ih.h" 32c9c9de93SXiangliang Yu #include "soc15_common.h" 33c9c9de93SXiangliang Yu #include "mxgpu_ai.h" 34c9c9de93SXiangliang Yu 35c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 36c9c9de93SXiangliang Yu { 3748527e52SMonk Liu WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 38c9c9de93SXiangliang Yu } 39c9c9de93SXiangliang Yu 40c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 41c9c9de93SXiangliang Yu { 4248527e52SMonk Liu WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 43c9c9de93SXiangliang Yu } 44c9c9de93SXiangliang Yu 4548527e52SMonk Liu /* 4648527e52SMonk Liu * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 4748527e52SMonk Liu * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1 4848527e52SMonk Liu * by host. 4948527e52SMonk Liu * 5048527e52SMonk Liu * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 5148527e52SMonk Liu * correct value since it doesn't return the RCV_DW0 under the case that 5248527e52SMonk Liu * RCV_MSG_VALID is set by host. 5348527e52SMonk Liu */ 5448527e52SMonk Liu static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) 5548527e52SMonk Liu { 5648527e52SMonk Liu return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 5748527e52SMonk Liu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 5848527e52SMonk Liu } 5948527e52SMonk Liu 6048527e52SMonk Liu 61c9c9de93SXiangliang Yu static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 62c9c9de93SXiangliang Yu enum idh_event event) 63c9c9de93SXiangliang Yu { 64c9c9de93SXiangliang Yu u32 reg; 65c9c9de93SXiangliang Yu 66c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 67c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 68c9c9de93SXiangliang Yu if (reg != event) 69c9c9de93SXiangliang Yu return -ENOENT; 70c9c9de93SXiangliang Yu 71c9c9de93SXiangliang Yu xgpu_ai_mailbox_send_ack(adev); 72c9c9de93SXiangliang Yu 73c9c9de93SXiangliang Yu return 0; 74c9c9de93SXiangliang Yu } 75c9c9de93SXiangliang Yu 7648527e52SMonk Liu static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) { 7748527e52SMonk Liu return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 7848527e52SMonk Liu } 7948527e52SMonk Liu 80c9c9de93SXiangliang Yu static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 81c9c9de93SXiangliang Yu { 8248527e52SMonk Liu int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT; 8348527e52SMonk Liu u8 reg; 84c9c9de93SXiangliang Yu 8548527e52SMonk Liu do { 8648527e52SMonk Liu reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 8748527e52SMonk Liu if (reg & 2) 8848527e52SMonk Liu return 0; 8948527e52SMonk Liu 9017b2e332SMonk Liu mdelay(5); 9117b2e332SMonk Liu timeout -= 5; 9248527e52SMonk Liu } while (timeout > 1); 93c9c9de93SXiangliang Yu 9448527e52SMonk Liu pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); 95c9c9de93SXiangliang Yu 9648527e52SMonk Liu return -ETIME; 97c9c9de93SXiangliang Yu } 98c9c9de93SXiangliang Yu 9994b4fd72SMonk Liu static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 100c9c9de93SXiangliang Yu { 10148527e52SMonk Liu int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT; 102c9c9de93SXiangliang Yu 10348527e52SMonk Liu do { 104c9c9de93SXiangliang Yu r = xgpu_ai_mailbox_rcv_msg(adev, event); 10548527e52SMonk Liu if (!r) 10648527e52SMonk Liu return 0; 107c9c9de93SXiangliang Yu 10848527e52SMonk Liu msleep(10); 10948527e52SMonk Liu timeout -= 10; 11048527e52SMonk Liu } while (timeout > 1); 111c9c9de93SXiangliang Yu 11248527e52SMonk Liu pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); 11348527e52SMonk Liu 11448527e52SMonk Liu return -ETIME; 115c9c9de93SXiangliang Yu } 116c9c9de93SXiangliang Yu 11789041940SGavin Wan static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 11889041940SGavin Wan enum idh_request req, u32 data1, u32 data2, u32 data3) { 11989041940SGavin Wan u32 reg; 120c9c9de93SXiangliang Yu int r; 12148527e52SMonk Liu uint8_t trn; 12248527e52SMonk Liu 12348527e52SMonk Liu /* IMPORTANT: 12448527e52SMonk Liu * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 12548527e52SMonk Liu * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 12648527e52SMonk Liu * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack() 12748527e52SMonk Liu * will return immediatly 12848527e52SMonk Liu */ 12948527e52SMonk Liu do { 13048527e52SMonk Liu xgpu_ai_mailbox_set_valid(adev, false); 13148527e52SMonk Liu trn = xgpu_ai_peek_ack(adev); 13248527e52SMonk Liu if (trn) { 13336b3f84aSColin Ian King pr_err("trn=%x ACK should not assert! wait again !\n", trn); 13448527e52SMonk Liu msleep(1); 13548527e52SMonk Liu } 13648527e52SMonk Liu } while(trn); 137c9c9de93SXiangliang Yu 13889041940SGavin Wan reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 13989041940SGavin Wan mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 14089041940SGavin Wan reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 14189041940SGavin Wan MSGBUF_DATA, req); 14289041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 14389041940SGavin Wan reg); 14489041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), 14589041940SGavin Wan data1); 14689041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), 14789041940SGavin Wan data2); 14889041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), 14989041940SGavin Wan data3); 15089041940SGavin Wan 15189041940SGavin Wan xgpu_ai_mailbox_set_valid(adev, true); 152c9c9de93SXiangliang Yu 153c9c9de93SXiangliang Yu /* start to poll ack */ 154c9c9de93SXiangliang Yu r = xgpu_ai_poll_ack(adev); 155c9c9de93SXiangliang Yu if (r) 15617b2e332SMonk Liu pr_err("Doesn't get ack from pf, continue\n"); 157c9c9de93SXiangliang Yu 158c9c9de93SXiangliang Yu xgpu_ai_mailbox_set_valid(adev, false); 15989041940SGavin Wan } 16089041940SGavin Wan 16189041940SGavin Wan static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 16289041940SGavin Wan enum idh_request req) 16389041940SGavin Wan { 16489041940SGavin Wan int r; 16589041940SGavin Wan 16689041940SGavin Wan xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 167c9c9de93SXiangliang Yu 168c9c9de93SXiangliang Yu /* start to check msg if request is idh_req_gpu_init_access */ 169c9c9de93SXiangliang Yu if (req == IDH_REQ_GPU_INIT_ACCESS || 170c9c9de93SXiangliang Yu req == IDH_REQ_GPU_FINI_ACCESS || 171c9c9de93SXiangliang Yu req == IDH_REQ_GPU_RESET_ACCESS) { 17294b4fd72SMonk Liu r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 17317b2e332SMonk Liu if (r) { 17417b2e332SMonk Liu pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 175c9c9de93SXiangliang Yu return r; 176c9c9de93SXiangliang Yu } 1772dc8f81eSHorace Chen /* Retrieve checksum from mailbox2 */ 178d3c117e5SEmily Deng if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 1792dc8f81eSHorace Chen adev->virt.fw_reserve.checksum_key = 1802dc8f81eSHorace Chen RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 1812dc8f81eSHorace Chen mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 1822dc8f81eSHorace Chen } 18317b2e332SMonk Liu } 184c9c9de93SXiangliang Yu 185c9c9de93SXiangliang Yu return 0; 186c9c9de93SXiangliang Yu } 187c9c9de93SXiangliang Yu 188f98b617eSMonk Liu static int xgpu_ai_request_reset(struct amdgpu_device *adev) 189f98b617eSMonk Liu { 190f98b617eSMonk Liu return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 191f98b617eSMonk Liu } 192f98b617eSMonk Liu 193c9c9de93SXiangliang Yu static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 194c9c9de93SXiangliang Yu bool init) 195c9c9de93SXiangliang Yu { 196c9c9de93SXiangliang Yu enum idh_request req; 197c9c9de93SXiangliang Yu 198c9c9de93SXiangliang Yu req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 199c9c9de93SXiangliang Yu return xgpu_ai_send_access_requests(adev, req); 200c9c9de93SXiangliang Yu } 201c9c9de93SXiangliang Yu 202c9c9de93SXiangliang Yu static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 203c9c9de93SXiangliang Yu bool init) 204c9c9de93SXiangliang Yu { 205c9c9de93SXiangliang Yu enum idh_request req; 206c9c9de93SXiangliang Yu int r = 0; 207c9c9de93SXiangliang Yu 208c9c9de93SXiangliang Yu req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 209c9c9de93SXiangliang Yu r = xgpu_ai_send_access_requests(adev, req); 210c9c9de93SXiangliang Yu 211c9c9de93SXiangliang Yu return r; 212c9c9de93SXiangliang Yu } 213c9c9de93SXiangliang Yu 214f98b617eSMonk Liu static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 215f98b617eSMonk Liu struct amdgpu_irq_src *source, 216f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 217f98b617eSMonk Liu { 218034b6867SXiangliang Yu DRM_DEBUG("get ack intr and do nothing.\n"); 219f98b617eSMonk Liu return 0; 220f98b617eSMonk Liu } 221f98b617eSMonk Liu 222f98b617eSMonk Liu static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 223f98b617eSMonk Liu struct amdgpu_irq_src *source, 224f98b617eSMonk Liu unsigned type, 225f98b617eSMonk Liu enum amdgpu_interrupt_state state) 226f98b617eSMonk Liu { 227f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 228f98b617eSMonk Liu 229f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 230f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 231f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 232f98b617eSMonk Liu 233f98b617eSMonk Liu return 0; 234f98b617eSMonk Liu } 235f98b617eSMonk Liu 236f98b617eSMonk Liu static void xgpu_ai_mailbox_flr_work(struct work_struct *work) 237f98b617eSMonk Liu { 238f98b617eSMonk Liu struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 239f98b617eSMonk Liu struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 24048527e52SMonk Liu int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; 241*f1403342SChristian König int locked; 242f98b617eSMonk Liu 24348527e52SMonk Liu /* block amdgpu_gpu_recover till msg FLR COMPLETE received, 24448527e52SMonk Liu * otherwise the mailbox msg will be ruined/reseted by 24548527e52SMonk Liu * the VF FLR. 24648527e52SMonk Liu * 247*f1403342SChristian König * we can unlock the lock_reset to allow "amdgpu_job_timedout" 24848527e52SMonk Liu * to run gpu_recover() after FLR_NOTIFICATION_CMPL received 24948527e52SMonk Liu * which means host side had finished this VF's FLR. 25048527e52SMonk Liu */ 251*f1403342SChristian König locked = mutex_trylock(&adev->lock_reset); 252*f1403342SChristian König if (locked) 253*f1403342SChristian König adev->in_gpu_reset = true; 254*f1403342SChristian König 25548527e52SMonk Liu do { 25648527e52SMonk Liu if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) 25748527e52SMonk Liu goto flr_done; 25848527e52SMonk Liu 25948527e52SMonk Liu msleep(10); 26048527e52SMonk Liu timeout -= 10; 26148527e52SMonk Liu } while (timeout > 1); 26248527e52SMonk Liu 26348527e52SMonk Liu flr_done: 264*f1403342SChristian König if (locked) { 265*f1403342SChristian König adev->in_gpu_reset = false; 266*f1403342SChristian König mutex_unlock(&adev->lock_reset); 267*f1403342SChristian König } 26848527e52SMonk Liu 26948527e52SMonk Liu /* Trigger recovery for world switch failure if no TDR */ 2702c11ee6aSwentalou if (amdgpu_device_should_recover_gpu(adev) 271912dfc84SEvan Quan && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT) 27212938fadSChristian König amdgpu_device_gpu_recover(adev, NULL); 273f98b617eSMonk Liu } 274f98b617eSMonk Liu 275f98b617eSMonk Liu static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 276f98b617eSMonk Liu struct amdgpu_irq_src *src, 277f98b617eSMonk Liu unsigned type, 278f98b617eSMonk Liu enum amdgpu_interrupt_state state) 279f98b617eSMonk Liu { 280f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 281f98b617eSMonk Liu 282f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 283f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 284f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 285f98b617eSMonk Liu 286f98b617eSMonk Liu return 0; 287f98b617eSMonk Liu } 288f98b617eSMonk Liu 289f98b617eSMonk Liu static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 290f98b617eSMonk Liu struct amdgpu_irq_src *source, 291f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 292f98b617eSMonk Liu { 29348527e52SMonk Liu enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); 294f98b617eSMonk Liu 29548527e52SMonk Liu switch (event) { 29648527e52SMonk Liu case IDH_FLR_NOTIFICATION: 29748527e52SMonk Liu if (amdgpu_sriov_runtime(adev)) 298f98b617eSMonk Liu schedule_work(&adev->virt.flr_work); 29948527e52SMonk Liu break; 300b6818520STrigger Huang case IDH_QUERY_ALIVE: 301b6818520STrigger Huang xgpu_ai_mailbox_send_ack(adev); 302b6818520STrigger Huang break; 30348527e52SMonk Liu /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 30448527e52SMonk Liu * it byfar since that polling thread will handle it, 30548527e52SMonk Liu * other msg like flr complete is not handled here. 30648527e52SMonk Liu */ 30748527e52SMonk Liu case IDH_CLR_MSG_BUF: 30848527e52SMonk Liu case IDH_FLR_NOTIFICATION_CMPL: 30948527e52SMonk Liu case IDH_READY_TO_ACCESS_GPU: 31048527e52SMonk Liu default: 31148527e52SMonk Liu break; 3120c63e113SMonk Liu } 313f98b617eSMonk Liu 314f98b617eSMonk Liu return 0; 315f98b617eSMonk Liu } 316f98b617eSMonk Liu 317f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 318f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_ack_irq, 319f98b617eSMonk Liu .process = xgpu_ai_mailbox_ack_irq, 320f98b617eSMonk Liu }; 321f98b617eSMonk Liu 322f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 323f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_rcv_irq, 324f98b617eSMonk Liu .process = xgpu_ai_mailbox_rcv_irq, 325f98b617eSMonk Liu }; 326f98b617eSMonk Liu 327f98b617eSMonk Liu void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 328f98b617eSMonk Liu { 329f98b617eSMonk Liu adev->virt.ack_irq.num_types = 1; 330f98b617eSMonk Liu adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 331f98b617eSMonk Liu adev->virt.rcv_irq.num_types = 1; 332f98b617eSMonk Liu adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 333f98b617eSMonk Liu } 334f98b617eSMonk Liu 335f98b617eSMonk Liu int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 336f98b617eSMonk Liu { 337f98b617eSMonk Liu int r; 338f98b617eSMonk Liu 3393760f76cSOak Zeng r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 340f98b617eSMonk Liu if (r) 341f98b617eSMonk Liu return r; 342f98b617eSMonk Liu 3433760f76cSOak Zeng r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 344f98b617eSMonk Liu if (r) { 345f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 346f98b617eSMonk Liu return r; 347f98b617eSMonk Liu } 348f98b617eSMonk Liu 349f98b617eSMonk Liu return 0; 350f98b617eSMonk Liu } 351f98b617eSMonk Liu 352f98b617eSMonk Liu int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 353f98b617eSMonk Liu { 354f98b617eSMonk Liu int r; 355f98b617eSMonk Liu 356f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 357f98b617eSMonk Liu if (r) 358f98b617eSMonk Liu return r; 359f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 360f98b617eSMonk Liu if (r) { 361f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 362f98b617eSMonk Liu return r; 363f98b617eSMonk Liu } 364f98b617eSMonk Liu 365f98b617eSMonk Liu INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 366f98b617eSMonk Liu 367f98b617eSMonk Liu return 0; 368f98b617eSMonk Liu } 369f98b617eSMonk Liu 370f98b617eSMonk Liu void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 371f98b617eSMonk Liu { 372f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 373f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 374f98b617eSMonk Liu } 375f98b617eSMonk Liu 376c9c9de93SXiangliang Yu const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 377c9c9de93SXiangliang Yu .req_full_gpu = xgpu_ai_request_full_gpu_access, 378c9c9de93SXiangliang Yu .rel_full_gpu = xgpu_ai_release_full_gpu_access, 379f98b617eSMonk Liu .reset_gpu = xgpu_ai_request_reset, 380b5914238Spding .wait_reset = NULL, 38189041940SGavin Wan .trans_msg = xgpu_ai_mailbox_trans_msg, 382c9c9de93SXiangliang Yu }; 383