1c9c9de93SXiangliang Yu /* 2c9c9de93SXiangliang Yu * Copyright 2014 Advanced Micro Devices, Inc. 3c9c9de93SXiangliang Yu * 4c9c9de93SXiangliang Yu * Permission is hereby granted, free of charge, to any person obtaining a 5c9c9de93SXiangliang Yu * copy of this software and associated documentation files (the "Software"), 6c9c9de93SXiangliang Yu * to deal in the Software without restriction, including without limitation 7c9c9de93SXiangliang Yu * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c9c9de93SXiangliang Yu * and/or sell copies of the Software, and to permit persons to whom the 9c9c9de93SXiangliang Yu * Software is furnished to do so, subject to the following conditions: 10c9c9de93SXiangliang Yu * 11c9c9de93SXiangliang Yu * The above copyright notice and this permission notice shall be included in 12c9c9de93SXiangliang Yu * all copies or substantial portions of the Software. 13c9c9de93SXiangliang Yu * 14c9c9de93SXiangliang Yu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c9c9de93SXiangliang Yu * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c9c9de93SXiangliang Yu * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c9c9de93SXiangliang Yu * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c9c9de93SXiangliang Yu * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c9c9de93SXiangliang Yu * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c9c9de93SXiangliang Yu * OTHER DEALINGS IN THE SOFTWARE. 21c9c9de93SXiangliang Yu * 22c9c9de93SXiangliang Yu */ 23c9c9de93SXiangliang Yu 24c9c9de93SXiangliang Yu #include "amdgpu.h" 25f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_offset.h" 26f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_sh_mask.h" 27cde5c34fSFeifei Xu #include "gc/gc_9_0_offset.h" 28cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h" 29c9c9de93SXiangliang Yu #include "soc15.h" 30f98b617eSMonk Liu #include "vega10_ih.h" 31c9c9de93SXiangliang Yu #include "soc15_common.h" 32c9c9de93SXiangliang Yu #include "mxgpu_ai.h" 33c9c9de93SXiangliang Yu 34c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 35c9c9de93SXiangliang Yu { 3648527e52SMonk Liu WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 37c9c9de93SXiangliang Yu } 38c9c9de93SXiangliang Yu 39c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 40c9c9de93SXiangliang Yu { 4148527e52SMonk Liu WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 42c9c9de93SXiangliang Yu } 43c9c9de93SXiangliang Yu 4448527e52SMonk Liu /* 4548527e52SMonk Liu * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 4648527e52SMonk Liu * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1 4748527e52SMonk Liu * by host. 4848527e52SMonk Liu * 4948527e52SMonk Liu * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 5048527e52SMonk Liu * correct value since it doesn't return the RCV_DW0 under the case that 5148527e52SMonk Liu * RCV_MSG_VALID is set by host. 5248527e52SMonk Liu */ 5348527e52SMonk Liu static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) 5448527e52SMonk Liu { 5548527e52SMonk Liu return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 5648527e52SMonk Liu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 5748527e52SMonk Liu } 5848527e52SMonk Liu 5948527e52SMonk Liu 60c9c9de93SXiangliang Yu static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 61c9c9de93SXiangliang Yu enum idh_event event) 62c9c9de93SXiangliang Yu { 63c9c9de93SXiangliang Yu u32 reg; 64c9c9de93SXiangliang Yu 65c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 66c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 67c9c9de93SXiangliang Yu if (reg != event) 68c9c9de93SXiangliang Yu return -ENOENT; 69c9c9de93SXiangliang Yu 70c9c9de93SXiangliang Yu xgpu_ai_mailbox_send_ack(adev); 71c9c9de93SXiangliang Yu 72c9c9de93SXiangliang Yu return 0; 73c9c9de93SXiangliang Yu } 74c9c9de93SXiangliang Yu 7548527e52SMonk Liu static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) { 7648527e52SMonk Liu return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 7748527e52SMonk Liu } 7848527e52SMonk Liu 79c9c9de93SXiangliang Yu static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 80c9c9de93SXiangliang Yu { 8148527e52SMonk Liu int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT; 8248527e52SMonk Liu u8 reg; 83c9c9de93SXiangliang Yu 8448527e52SMonk Liu do { 8548527e52SMonk Liu reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 8648527e52SMonk Liu if (reg & 2) 8748527e52SMonk Liu return 0; 8848527e52SMonk Liu 8917b2e332SMonk Liu mdelay(5); 9017b2e332SMonk Liu timeout -= 5; 9148527e52SMonk Liu } while (timeout > 1); 92c9c9de93SXiangliang Yu 9348527e52SMonk Liu pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); 94c9c9de93SXiangliang Yu 9548527e52SMonk Liu return -ETIME; 96c9c9de93SXiangliang Yu } 97c9c9de93SXiangliang Yu 9894b4fd72SMonk Liu static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 99c9c9de93SXiangliang Yu { 10048527e52SMonk Liu int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT; 101c9c9de93SXiangliang Yu 10248527e52SMonk Liu do { 103c9c9de93SXiangliang Yu r = xgpu_ai_mailbox_rcv_msg(adev, event); 10448527e52SMonk Liu if (!r) 10548527e52SMonk Liu return 0; 106c9c9de93SXiangliang Yu 10748527e52SMonk Liu msleep(10); 10848527e52SMonk Liu timeout -= 10; 10948527e52SMonk Liu } while (timeout > 1); 110c9c9de93SXiangliang Yu 11148527e52SMonk Liu pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); 11248527e52SMonk Liu 11348527e52SMonk Liu return -ETIME; 114c9c9de93SXiangliang Yu } 115c9c9de93SXiangliang Yu 11689041940SGavin Wan static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 11789041940SGavin Wan enum idh_request req, u32 data1, u32 data2, u32 data3) { 11889041940SGavin Wan u32 reg; 119c9c9de93SXiangliang Yu int r; 12048527e52SMonk Liu uint8_t trn; 12148527e52SMonk Liu 12248527e52SMonk Liu /* IMPORTANT: 12348527e52SMonk Liu * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 12448527e52SMonk Liu * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 12548527e52SMonk Liu * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack() 12648527e52SMonk Liu * will return immediatly 12748527e52SMonk Liu */ 12848527e52SMonk Liu do { 12948527e52SMonk Liu xgpu_ai_mailbox_set_valid(adev, false); 13048527e52SMonk Liu trn = xgpu_ai_peek_ack(adev); 13148527e52SMonk Liu if (trn) { 13236b3f84aSColin Ian King pr_err("trn=%x ACK should not assert! wait again !\n", trn); 13348527e52SMonk Liu msleep(1); 13448527e52SMonk Liu } 13548527e52SMonk Liu } while(trn); 136c9c9de93SXiangliang Yu 13789041940SGavin Wan reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 13889041940SGavin Wan mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 13989041940SGavin Wan reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 14089041940SGavin Wan MSGBUF_DATA, req); 14189041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 14289041940SGavin Wan reg); 14389041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), 14489041940SGavin Wan data1); 14589041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), 14689041940SGavin Wan data2); 14789041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), 14889041940SGavin Wan data3); 14989041940SGavin Wan 15089041940SGavin Wan xgpu_ai_mailbox_set_valid(adev, true); 151c9c9de93SXiangliang Yu 152c9c9de93SXiangliang Yu /* start to poll ack */ 153c9c9de93SXiangliang Yu r = xgpu_ai_poll_ack(adev); 154c9c9de93SXiangliang Yu if (r) 15517b2e332SMonk Liu pr_err("Doesn't get ack from pf, continue\n"); 156c9c9de93SXiangliang Yu 157c9c9de93SXiangliang Yu xgpu_ai_mailbox_set_valid(adev, false); 15889041940SGavin Wan } 15989041940SGavin Wan 16089041940SGavin Wan static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 16189041940SGavin Wan enum idh_request req) 16289041940SGavin Wan { 16389041940SGavin Wan int r; 16489041940SGavin Wan 16589041940SGavin Wan xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 166c9c9de93SXiangliang Yu 167c9c9de93SXiangliang Yu /* start to check msg if request is idh_req_gpu_init_access */ 168c9c9de93SXiangliang Yu if (req == IDH_REQ_GPU_INIT_ACCESS || 169c9c9de93SXiangliang Yu req == IDH_REQ_GPU_FINI_ACCESS || 170c9c9de93SXiangliang Yu req == IDH_REQ_GPU_RESET_ACCESS) { 17194b4fd72SMonk Liu r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 17217b2e332SMonk Liu if (r) { 17317b2e332SMonk Liu pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 174c9c9de93SXiangliang Yu return r; 175c9c9de93SXiangliang Yu } 1762dc8f81eSHorace Chen /* Retrieve checksum from mailbox2 */ 1772dc8f81eSHorace Chen if (req == IDH_REQ_GPU_INIT_ACCESS) { 1782dc8f81eSHorace Chen adev->virt.fw_reserve.checksum_key = 1792dc8f81eSHorace Chen RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 1802dc8f81eSHorace Chen mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 1812dc8f81eSHorace Chen } 18217b2e332SMonk Liu } 183c9c9de93SXiangliang Yu 184c9c9de93SXiangliang Yu return 0; 185c9c9de93SXiangliang Yu } 186c9c9de93SXiangliang Yu 187f98b617eSMonk Liu static int xgpu_ai_request_reset(struct amdgpu_device *adev) 188f98b617eSMonk Liu { 189f98b617eSMonk Liu return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 190f98b617eSMonk Liu } 191f98b617eSMonk Liu 192c9c9de93SXiangliang Yu static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 193c9c9de93SXiangliang Yu bool init) 194c9c9de93SXiangliang Yu { 195c9c9de93SXiangliang Yu enum idh_request req; 196c9c9de93SXiangliang Yu 197c9c9de93SXiangliang Yu req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 198c9c9de93SXiangliang Yu return xgpu_ai_send_access_requests(adev, req); 199c9c9de93SXiangliang Yu } 200c9c9de93SXiangliang Yu 201c9c9de93SXiangliang Yu static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 202c9c9de93SXiangliang Yu bool init) 203c9c9de93SXiangliang Yu { 204c9c9de93SXiangliang Yu enum idh_request req; 205c9c9de93SXiangliang Yu int r = 0; 206c9c9de93SXiangliang Yu 207c9c9de93SXiangliang Yu req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 208c9c9de93SXiangliang Yu r = xgpu_ai_send_access_requests(adev, req); 209c9c9de93SXiangliang Yu 210c9c9de93SXiangliang Yu return r; 211c9c9de93SXiangliang Yu } 212c9c9de93SXiangliang Yu 213f98b617eSMonk Liu static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 214f98b617eSMonk Liu struct amdgpu_irq_src *source, 215f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 216f98b617eSMonk Liu { 217034b6867SXiangliang Yu DRM_DEBUG("get ack intr and do nothing.\n"); 218f98b617eSMonk Liu return 0; 219f98b617eSMonk Liu } 220f98b617eSMonk Liu 221f98b617eSMonk Liu static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 222f98b617eSMonk Liu struct amdgpu_irq_src *source, 223f98b617eSMonk Liu unsigned type, 224f98b617eSMonk Liu enum amdgpu_interrupt_state state) 225f98b617eSMonk Liu { 226f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 227f98b617eSMonk Liu 228f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 229f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 230f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 231f98b617eSMonk Liu 232f98b617eSMonk Liu return 0; 233f98b617eSMonk Liu } 234f98b617eSMonk Liu 235f98b617eSMonk Liu static void xgpu_ai_mailbox_flr_work(struct work_struct *work) 236f98b617eSMonk Liu { 237f98b617eSMonk Liu struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 238f98b617eSMonk Liu struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 23948527e52SMonk Liu int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; 24048527e52SMonk Liu int locked; 241f98b617eSMonk Liu 24248527e52SMonk Liu /* block amdgpu_gpu_recover till msg FLR COMPLETE received, 24348527e52SMonk Liu * otherwise the mailbox msg will be ruined/reseted by 24448527e52SMonk Liu * the VF FLR. 24548527e52SMonk Liu * 24648527e52SMonk Liu * we can unlock the lock_reset to allow "amdgpu_job_timedout" 24748527e52SMonk Liu * to run gpu_recover() after FLR_NOTIFICATION_CMPL received 24848527e52SMonk Liu * which means host side had finished this VF's FLR. 24948527e52SMonk Liu */ 25048527e52SMonk Liu locked = mutex_trylock(&adev->lock_reset); 25148527e52SMonk Liu if (locked) 25248527e52SMonk Liu adev->in_gpu_reset = 1; 253f98b617eSMonk Liu 25448527e52SMonk Liu do { 25548527e52SMonk Liu if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) 25648527e52SMonk Liu goto flr_done; 25748527e52SMonk Liu 25848527e52SMonk Liu msleep(10); 25948527e52SMonk Liu timeout -= 10; 26048527e52SMonk Liu } while (timeout > 1); 26148527e52SMonk Liu 26248527e52SMonk Liu flr_done: 2636e9c2b88SEmily Deng if (locked) { 2646e9c2b88SEmily Deng adev->in_gpu_reset = 0; 26548527e52SMonk Liu mutex_unlock(&adev->lock_reset); 2666e9c2b88SEmily Deng } 26748527e52SMonk Liu 26848527e52SMonk Liu /* Trigger recovery for world switch failure if no TDR */ 269*12938fadSChristian König if (amdgpu_device_should_recover_gpu(adev)) 270*12938fadSChristian König amdgpu_device_gpu_recover(adev, NULL); 271f98b617eSMonk Liu } 272f98b617eSMonk Liu 273f98b617eSMonk Liu static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 274f98b617eSMonk Liu struct amdgpu_irq_src *src, 275f98b617eSMonk Liu unsigned type, 276f98b617eSMonk Liu enum amdgpu_interrupt_state state) 277f98b617eSMonk Liu { 278f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 279f98b617eSMonk Liu 280f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 281f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 282f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 283f98b617eSMonk Liu 284f98b617eSMonk Liu return 0; 285f98b617eSMonk Liu } 286f98b617eSMonk Liu 287f98b617eSMonk Liu static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 288f98b617eSMonk Liu struct amdgpu_irq_src *source, 289f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 290f98b617eSMonk Liu { 29148527e52SMonk Liu enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); 292f98b617eSMonk Liu 29348527e52SMonk Liu switch (event) { 29448527e52SMonk Liu case IDH_FLR_NOTIFICATION: 29548527e52SMonk Liu if (amdgpu_sriov_runtime(adev)) 296f98b617eSMonk Liu schedule_work(&adev->virt.flr_work); 29748527e52SMonk Liu break; 29848527e52SMonk Liu /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 29948527e52SMonk Liu * it byfar since that polling thread will handle it, 30048527e52SMonk Liu * other msg like flr complete is not handled here. 30148527e52SMonk Liu */ 30248527e52SMonk Liu case IDH_CLR_MSG_BUF: 30348527e52SMonk Liu case IDH_FLR_NOTIFICATION_CMPL: 30448527e52SMonk Liu case IDH_READY_TO_ACCESS_GPU: 30548527e52SMonk Liu default: 30648527e52SMonk Liu break; 3070c63e113SMonk Liu } 308f98b617eSMonk Liu 309f98b617eSMonk Liu return 0; 310f98b617eSMonk Liu } 311f98b617eSMonk Liu 312f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 313f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_ack_irq, 314f98b617eSMonk Liu .process = xgpu_ai_mailbox_ack_irq, 315f98b617eSMonk Liu }; 316f98b617eSMonk Liu 317f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 318f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_rcv_irq, 319f98b617eSMonk Liu .process = xgpu_ai_mailbox_rcv_irq, 320f98b617eSMonk Liu }; 321f98b617eSMonk Liu 322f98b617eSMonk Liu void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 323f98b617eSMonk Liu { 324f98b617eSMonk Liu adev->virt.ack_irq.num_types = 1; 325f98b617eSMonk Liu adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 326f98b617eSMonk Liu adev->virt.rcv_irq.num_types = 1; 327f98b617eSMonk Liu adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 328f98b617eSMonk Liu } 329f98b617eSMonk Liu 330f98b617eSMonk Liu int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 331f98b617eSMonk Liu { 332f98b617eSMonk Liu int r; 333f98b617eSMonk Liu 3343760f76cSOak Zeng r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 335f98b617eSMonk Liu if (r) 336f98b617eSMonk Liu return r; 337f98b617eSMonk Liu 3383760f76cSOak Zeng r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 339f98b617eSMonk Liu if (r) { 340f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 341f98b617eSMonk Liu return r; 342f98b617eSMonk Liu } 343f98b617eSMonk Liu 344f98b617eSMonk Liu return 0; 345f98b617eSMonk Liu } 346f98b617eSMonk Liu 347f98b617eSMonk Liu int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 348f98b617eSMonk Liu { 349f98b617eSMonk Liu int r; 350f98b617eSMonk Liu 351f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 352f98b617eSMonk Liu if (r) 353f98b617eSMonk Liu return r; 354f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 355f98b617eSMonk Liu if (r) { 356f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 357f98b617eSMonk Liu return r; 358f98b617eSMonk Liu } 359f98b617eSMonk Liu 360f98b617eSMonk Liu INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 361f98b617eSMonk Liu 362f98b617eSMonk Liu return 0; 363f98b617eSMonk Liu } 364f98b617eSMonk Liu 365f98b617eSMonk Liu void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 366f98b617eSMonk Liu { 367f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 368f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 369f98b617eSMonk Liu } 370f98b617eSMonk Liu 371c9c9de93SXiangliang Yu const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 372c9c9de93SXiangliang Yu .req_full_gpu = xgpu_ai_request_full_gpu_access, 373c9c9de93SXiangliang Yu .rel_full_gpu = xgpu_ai_release_full_gpu_access, 374f98b617eSMonk Liu .reset_gpu = xgpu_ai_request_reset, 375b5914238Spding .wait_reset = NULL, 37689041940SGavin Wan .trans_msg = xgpu_ai_mailbox_trans_msg, 377c9c9de93SXiangliang Yu }; 378