1c9c9de93SXiangliang Yu /* 2c9c9de93SXiangliang Yu * Copyright 2014 Advanced Micro Devices, Inc. 3c9c9de93SXiangliang Yu * 4c9c9de93SXiangliang Yu * Permission is hereby granted, free of charge, to any person obtaining a 5c9c9de93SXiangliang Yu * copy of this software and associated documentation files (the "Software"), 6c9c9de93SXiangliang Yu * to deal in the Software without restriction, including without limitation 7c9c9de93SXiangliang Yu * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c9c9de93SXiangliang Yu * and/or sell copies of the Software, and to permit persons to whom the 9c9c9de93SXiangliang Yu * Software is furnished to do so, subject to the following conditions: 10c9c9de93SXiangliang Yu * 11c9c9de93SXiangliang Yu * The above copyright notice and this permission notice shall be included in 12c9c9de93SXiangliang Yu * all copies or substantial portions of the Software. 13c9c9de93SXiangliang Yu * 14c9c9de93SXiangliang Yu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c9c9de93SXiangliang Yu * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c9c9de93SXiangliang Yu * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c9c9de93SXiangliang Yu * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c9c9de93SXiangliang Yu * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c9c9de93SXiangliang Yu * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c9c9de93SXiangliang Yu * OTHER DEALINGS IN THE SOFTWARE. 21c9c9de93SXiangliang Yu * 22c9c9de93SXiangliang Yu */ 23c9c9de93SXiangliang Yu 24c9c9de93SXiangliang Yu #include "amdgpu.h" 25c9c9de93SXiangliang Yu #include "vega10/soc15ip.h" 26c9c9de93SXiangliang Yu #include "vega10/NBIO/nbio_6_1_offset.h" 27c9c9de93SXiangliang Yu #include "vega10/NBIO/nbio_6_1_sh_mask.h" 28c9c9de93SXiangliang Yu #include "vega10/GC/gc_9_0_offset.h" 29c9c9de93SXiangliang Yu #include "vega10/GC/gc_9_0_sh_mask.h" 30c9c9de93SXiangliang Yu #include "soc15.h" 31f98b617eSMonk Liu #include "vega10_ih.h" 32c9c9de93SXiangliang Yu #include "soc15_common.h" 33c9c9de93SXiangliang Yu #include "mxgpu_ai.h" 34c9c9de93SXiangliang Yu 35c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 36c9c9de93SXiangliang Yu { 37c9c9de93SXiangliang Yu u32 reg; 38c9c9de93SXiangliang Yu int timeout = AI_MAILBOX_TIMEDOUT; 39c9c9de93SXiangliang Yu u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); 40c9c9de93SXiangliang Yu 41c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 42c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 43c9c9de93SXiangliang Yu reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1); 44c9c9de93SXiangliang Yu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 45c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL), reg); 46c9c9de93SXiangliang Yu 47c9c9de93SXiangliang Yu /*Wait for RCV_MSG_VALID to be 0*/ 48c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 49c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 50c9c9de93SXiangliang Yu while (reg & mask) { 51c9c9de93SXiangliang Yu if (timeout <= 0) { 52c9c9de93SXiangliang Yu pr_err("RCV_MSG_VALID is not cleared\n"); 53c9c9de93SXiangliang Yu break; 54c9c9de93SXiangliang Yu } 55c9c9de93SXiangliang Yu mdelay(1); 56c9c9de93SXiangliang Yu timeout -=1; 57c9c9de93SXiangliang Yu 58c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 59c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 60c9c9de93SXiangliang Yu } 61c9c9de93SXiangliang Yu } 62c9c9de93SXiangliang Yu 63c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 64c9c9de93SXiangliang Yu { 65c9c9de93SXiangliang Yu u32 reg; 66c9c9de93SXiangliang Yu 67c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 68c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 69c9c9de93SXiangliang Yu reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, 70c9c9de93SXiangliang Yu TRN_MSG_VALID, val ? 1 : 0); 71c9c9de93SXiangliang Yu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), 72c9c9de93SXiangliang Yu reg); 73c9c9de93SXiangliang Yu } 74c9c9de93SXiangliang Yu 75c9c9de93SXiangliang Yu static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 76c9c9de93SXiangliang Yu enum idh_event event) 77c9c9de93SXiangliang Yu { 78c9c9de93SXiangliang Yu u32 reg; 79c9c9de93SXiangliang Yu u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); 80c9c9de93SXiangliang Yu 81c9c9de93SXiangliang Yu if (event != IDH_FLR_NOTIFICATION_CMPL) { 82c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 83c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 84c9c9de93SXiangliang Yu if (!(reg & mask)) 85c9c9de93SXiangliang Yu return -ENOENT; 86c9c9de93SXiangliang Yu } 87c9c9de93SXiangliang Yu 88c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 89c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 90c9c9de93SXiangliang Yu if (reg != event) 91c9c9de93SXiangliang Yu return -ENOENT; 92c9c9de93SXiangliang Yu 93c9c9de93SXiangliang Yu xgpu_ai_mailbox_send_ack(adev); 94c9c9de93SXiangliang Yu 95c9c9de93SXiangliang Yu return 0; 96c9c9de93SXiangliang Yu } 97c9c9de93SXiangliang Yu 98c9c9de93SXiangliang Yu static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 99c9c9de93SXiangliang Yu { 100c9c9de93SXiangliang Yu int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 101c9c9de93SXiangliang Yu u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); 102c9c9de93SXiangliang Yu u32 reg; 103c9c9de93SXiangliang Yu 104c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 105c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 106c9c9de93SXiangliang Yu while (!(reg & mask)) { 107c9c9de93SXiangliang Yu if (timeout <= 0) { 108c9c9de93SXiangliang Yu pr_err("Doesn't get ack from pf.\n"); 109c9c9de93SXiangliang Yu r = -ETIME; 110c9c9de93SXiangliang Yu break; 111c9c9de93SXiangliang Yu } 11217b2e332SMonk Liu mdelay(5); 11317b2e332SMonk Liu timeout -= 5; 114c9c9de93SXiangliang Yu 115c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 116c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_CONTROL)); 117c9c9de93SXiangliang Yu } 118c9c9de93SXiangliang Yu 119c9c9de93SXiangliang Yu return r; 120c9c9de93SXiangliang Yu } 121c9c9de93SXiangliang Yu 12294b4fd72SMonk Liu static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 123c9c9de93SXiangliang Yu { 124c9c9de93SXiangliang Yu int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 125c9c9de93SXiangliang Yu 126c9c9de93SXiangliang Yu r = xgpu_ai_mailbox_rcv_msg(adev, event); 127c9c9de93SXiangliang Yu while (r) { 128c9c9de93SXiangliang Yu if (timeout <= 0) { 12917b2e332SMonk Liu pr_err("Doesn't get msg:%d from pf.\n", event); 130c9c9de93SXiangliang Yu r = -ETIME; 131c9c9de93SXiangliang Yu break; 132c9c9de93SXiangliang Yu } 13317b2e332SMonk Liu mdelay(5); 13417b2e332SMonk Liu timeout -= 5; 135c9c9de93SXiangliang Yu 136c9c9de93SXiangliang Yu r = xgpu_ai_mailbox_rcv_msg(adev, event); 137c9c9de93SXiangliang Yu } 138c9c9de93SXiangliang Yu 139c9c9de93SXiangliang Yu return r; 140c9c9de93SXiangliang Yu } 141c9c9de93SXiangliang Yu 14289041940SGavin Wan static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 14389041940SGavin Wan enum idh_request req, u32 data1, u32 data2, u32 data3) { 14489041940SGavin Wan u32 reg; 145c9c9de93SXiangliang Yu int r; 146c9c9de93SXiangliang Yu 14789041940SGavin Wan reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 14889041940SGavin Wan mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 14989041940SGavin Wan reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 15089041940SGavin Wan MSGBUF_DATA, req); 15189041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 15289041940SGavin Wan reg); 15389041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), 15489041940SGavin Wan data1); 15589041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), 15689041940SGavin Wan data2); 15789041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), 15889041940SGavin Wan data3); 15989041940SGavin Wan 16089041940SGavin Wan xgpu_ai_mailbox_set_valid(adev, true); 161c9c9de93SXiangliang Yu 162c9c9de93SXiangliang Yu /* start to poll ack */ 163c9c9de93SXiangliang Yu r = xgpu_ai_poll_ack(adev); 164c9c9de93SXiangliang Yu if (r) 16517b2e332SMonk Liu pr_err("Doesn't get ack from pf, continue\n"); 166c9c9de93SXiangliang Yu 167c9c9de93SXiangliang Yu xgpu_ai_mailbox_set_valid(adev, false); 16889041940SGavin Wan } 16989041940SGavin Wan 17089041940SGavin Wan static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 17189041940SGavin Wan enum idh_request req) 17289041940SGavin Wan { 17389041940SGavin Wan int r; 17489041940SGavin Wan 17589041940SGavin Wan xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 176c9c9de93SXiangliang Yu 177c9c9de93SXiangliang Yu /* start to check msg if request is idh_req_gpu_init_access */ 178c9c9de93SXiangliang Yu if (req == IDH_REQ_GPU_INIT_ACCESS || 179c9c9de93SXiangliang Yu req == IDH_REQ_GPU_FINI_ACCESS || 180c9c9de93SXiangliang Yu req == IDH_REQ_GPU_RESET_ACCESS) { 18194b4fd72SMonk Liu r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 18217b2e332SMonk Liu if (r) { 18317b2e332SMonk Liu pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 184c9c9de93SXiangliang Yu return r; 185c9c9de93SXiangliang Yu } 1862dc8f81eSHorace Chen /* Retrieve checksum from mailbox2 */ 1872dc8f81eSHorace Chen if (req == IDH_REQ_GPU_INIT_ACCESS) { 1882dc8f81eSHorace Chen adev->virt.fw_reserve.checksum_key = 1892dc8f81eSHorace Chen RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 1902dc8f81eSHorace Chen mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 1912dc8f81eSHorace Chen } 19217b2e332SMonk Liu } 193c9c9de93SXiangliang Yu 194c9c9de93SXiangliang Yu return 0; 195c9c9de93SXiangliang Yu } 196c9c9de93SXiangliang Yu 197f98b617eSMonk Liu static int xgpu_ai_request_reset(struct amdgpu_device *adev) 198f98b617eSMonk Liu { 199f98b617eSMonk Liu return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 200f98b617eSMonk Liu } 201f98b617eSMonk Liu 202c9c9de93SXiangliang Yu static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 203c9c9de93SXiangliang Yu bool init) 204c9c9de93SXiangliang Yu { 205c9c9de93SXiangliang Yu enum idh_request req; 206c9c9de93SXiangliang Yu 207c9c9de93SXiangliang Yu req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 208c9c9de93SXiangliang Yu return xgpu_ai_send_access_requests(adev, req); 209c9c9de93SXiangliang Yu } 210c9c9de93SXiangliang Yu 211c9c9de93SXiangliang Yu static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 212c9c9de93SXiangliang Yu bool init) 213c9c9de93SXiangliang Yu { 214c9c9de93SXiangliang Yu enum idh_request req; 215c9c9de93SXiangliang Yu int r = 0; 216c9c9de93SXiangliang Yu 217c9c9de93SXiangliang Yu req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 218c9c9de93SXiangliang Yu r = xgpu_ai_send_access_requests(adev, req); 219c9c9de93SXiangliang Yu 220c9c9de93SXiangliang Yu return r; 221c9c9de93SXiangliang Yu } 222c9c9de93SXiangliang Yu 223f98b617eSMonk Liu static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 224f98b617eSMonk Liu struct amdgpu_irq_src *source, 225f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 226f98b617eSMonk Liu { 227034b6867SXiangliang Yu DRM_DEBUG("get ack intr and do nothing.\n"); 228f98b617eSMonk Liu return 0; 229f98b617eSMonk Liu } 230f98b617eSMonk Liu 231f98b617eSMonk Liu static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 232f98b617eSMonk Liu struct amdgpu_irq_src *source, 233f98b617eSMonk Liu unsigned type, 234f98b617eSMonk Liu enum amdgpu_interrupt_state state) 235f98b617eSMonk Liu { 236f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 237f98b617eSMonk Liu 238f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 239f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 240f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 241f98b617eSMonk Liu 242f98b617eSMonk Liu return 0; 243f98b617eSMonk Liu } 244f98b617eSMonk Liu 245f98b617eSMonk Liu static void xgpu_ai_mailbox_flr_work(struct work_struct *work) 246f98b617eSMonk Liu { 247f98b617eSMonk Liu struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 248f98b617eSMonk Liu struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 249f98b617eSMonk Liu 250f98b617eSMonk Liu /* wait until RCV_MSG become 3 */ 251f98b617eSMonk Liu if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { 252f98b617eSMonk Liu pr_err("failed to recieve FLR_CMPL\n"); 253f98b617eSMonk Liu return; 254f98b617eSMonk Liu } 255f98b617eSMonk Liu 256f98b617eSMonk Liu /* Trigger recovery due to world switch failure */ 2577225f873SMonk Liu amdgpu_sriov_gpu_reset(adev, NULL); 258f98b617eSMonk Liu } 259f98b617eSMonk Liu 260f98b617eSMonk Liu static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 261f98b617eSMonk Liu struct amdgpu_irq_src *src, 262f98b617eSMonk Liu unsigned type, 263f98b617eSMonk Liu enum amdgpu_interrupt_state state) 264f98b617eSMonk Liu { 265f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 266f98b617eSMonk Liu 267f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 268f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 269f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 270f98b617eSMonk Liu 271f98b617eSMonk Liu return 0; 272f98b617eSMonk Liu } 273f98b617eSMonk Liu 274f98b617eSMonk Liu static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 275f98b617eSMonk Liu struct amdgpu_irq_src *source, 276f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 277f98b617eSMonk Liu { 278f98b617eSMonk Liu int r; 279f98b617eSMonk Liu 2800c63e113SMonk Liu /* trigger gpu-reset by hypervisor only if TDR disbaled */ 2810c63e113SMonk Liu if (amdgpu_lockup_timeout == 0) { 282f98b617eSMonk Liu /* see what event we get */ 283f98b617eSMonk Liu r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); 284f98b617eSMonk Liu 285f98b617eSMonk Liu /* only handle FLR_NOTIFY now */ 286f98b617eSMonk Liu if (!r) 287f98b617eSMonk Liu schedule_work(&adev->virt.flr_work); 2880c63e113SMonk Liu } 289f98b617eSMonk Liu 290f98b617eSMonk Liu return 0; 291f98b617eSMonk Liu } 292f98b617eSMonk Liu 293f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 294f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_ack_irq, 295f98b617eSMonk Liu .process = xgpu_ai_mailbox_ack_irq, 296f98b617eSMonk Liu }; 297f98b617eSMonk Liu 298f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 299f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_rcv_irq, 300f98b617eSMonk Liu .process = xgpu_ai_mailbox_rcv_irq, 301f98b617eSMonk Liu }; 302f98b617eSMonk Liu 303f98b617eSMonk Liu void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 304f98b617eSMonk Liu { 305f98b617eSMonk Liu adev->virt.ack_irq.num_types = 1; 306f98b617eSMonk Liu adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 307f98b617eSMonk Liu adev->virt.rcv_irq.num_types = 1; 308f98b617eSMonk Liu adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 309f98b617eSMonk Liu } 310f98b617eSMonk Liu 311f98b617eSMonk Liu int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 312f98b617eSMonk Liu { 313f98b617eSMonk Liu int r; 314f98b617eSMonk Liu 3153af906f0SMonk Liu r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 316f98b617eSMonk Liu if (r) 317f98b617eSMonk Liu return r; 318f98b617eSMonk Liu 3193af906f0SMonk Liu r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 320f98b617eSMonk Liu if (r) { 321f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 322f98b617eSMonk Liu return r; 323f98b617eSMonk Liu } 324f98b617eSMonk Liu 325f98b617eSMonk Liu return 0; 326f98b617eSMonk Liu } 327f98b617eSMonk Liu 328f98b617eSMonk Liu int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 329f98b617eSMonk Liu { 330f98b617eSMonk Liu int r; 331f98b617eSMonk Liu 332f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 333f98b617eSMonk Liu if (r) 334f98b617eSMonk Liu return r; 335f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 336f98b617eSMonk Liu if (r) { 337f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 338f98b617eSMonk Liu return r; 339f98b617eSMonk Liu } 340f98b617eSMonk Liu 341f98b617eSMonk Liu INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 342f98b617eSMonk Liu 343f98b617eSMonk Liu return 0; 344f98b617eSMonk Liu } 345f98b617eSMonk Liu 346f98b617eSMonk Liu void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 347f98b617eSMonk Liu { 348f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 349f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 350f98b617eSMonk Liu } 351f98b617eSMonk Liu 352c9c9de93SXiangliang Yu const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 353c9c9de93SXiangliang Yu .req_full_gpu = xgpu_ai_request_full_gpu_access, 354c9c9de93SXiangliang Yu .rel_full_gpu = xgpu_ai_release_full_gpu_access, 355f98b617eSMonk Liu .reset_gpu = xgpu_ai_request_reset, 356*b5914238Spding .wait_reset = NULL, 35789041940SGavin Wan .trans_msg = xgpu_ai_mailbox_trans_msg, 358c9c9de93SXiangliang Yu }; 359