1c9c9de93SXiangliang Yu /* 2c9c9de93SXiangliang Yu * Copyright 2014 Advanced Micro Devices, Inc. 3c9c9de93SXiangliang Yu * 4c9c9de93SXiangliang Yu * Permission is hereby granted, free of charge, to any person obtaining a 5c9c9de93SXiangliang Yu * copy of this software and associated documentation files (the "Software"), 6c9c9de93SXiangliang Yu * to deal in the Software without restriction, including without limitation 7c9c9de93SXiangliang Yu * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c9c9de93SXiangliang Yu * and/or sell copies of the Software, and to permit persons to whom the 9c9c9de93SXiangliang Yu * Software is furnished to do so, subject to the following conditions: 10c9c9de93SXiangliang Yu * 11c9c9de93SXiangliang Yu * The above copyright notice and this permission notice shall be included in 12c9c9de93SXiangliang Yu * all copies or substantial portions of the Software. 13c9c9de93SXiangliang Yu * 14c9c9de93SXiangliang Yu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c9c9de93SXiangliang Yu * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c9c9de93SXiangliang Yu * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c9c9de93SXiangliang Yu * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c9c9de93SXiangliang Yu * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c9c9de93SXiangliang Yu * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c9c9de93SXiangliang Yu * OTHER DEALINGS IN THE SOFTWARE. 21c9c9de93SXiangliang Yu * 22c9c9de93SXiangliang Yu */ 23c9c9de93SXiangliang Yu 24c9c9de93SXiangliang Yu #include "amdgpu.h" 25f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_offset.h" 26f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_sh_mask.h" 27cde5c34fSFeifei Xu #include "gc/gc_9_0_offset.h" 28cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h" 29c9c9de93SXiangliang Yu #include "soc15.h" 30f98b617eSMonk Liu #include "vega10_ih.h" 31c9c9de93SXiangliang Yu #include "soc15_common.h" 32c9c9de93SXiangliang Yu #include "mxgpu_ai.h" 33c9c9de93SXiangliang Yu 34c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 35c9c9de93SXiangliang Yu { 3648527e52SMonk Liu WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 37c9c9de93SXiangliang Yu } 38c9c9de93SXiangliang Yu 39c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 40c9c9de93SXiangliang Yu { 4148527e52SMonk Liu WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 42c9c9de93SXiangliang Yu } 43c9c9de93SXiangliang Yu 4448527e52SMonk Liu /* 4548527e52SMonk Liu * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 4648527e52SMonk Liu * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1 4748527e52SMonk Liu * by host. 4848527e52SMonk Liu * 4948527e52SMonk Liu * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 5048527e52SMonk Liu * correct value since it doesn't return the RCV_DW0 under the case that 5148527e52SMonk Liu * RCV_MSG_VALID is set by host. 5248527e52SMonk Liu */ 5348527e52SMonk Liu static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) 5448527e52SMonk Liu { 5548527e52SMonk Liu return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 5648527e52SMonk Liu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 5748527e52SMonk Liu } 5848527e52SMonk Liu 5948527e52SMonk Liu 60c9c9de93SXiangliang Yu static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 61c9c9de93SXiangliang Yu enum idh_event event) 62c9c9de93SXiangliang Yu { 63c9c9de93SXiangliang Yu u32 reg; 64c9c9de93SXiangliang Yu 65c9c9de93SXiangliang Yu reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 66c9c9de93SXiangliang Yu mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 67c9c9de93SXiangliang Yu if (reg != event) 68c9c9de93SXiangliang Yu return -ENOENT; 69c9c9de93SXiangliang Yu 70c9c9de93SXiangliang Yu xgpu_ai_mailbox_send_ack(adev); 71c9c9de93SXiangliang Yu 72c9c9de93SXiangliang Yu return 0; 73c9c9de93SXiangliang Yu } 74c9c9de93SXiangliang Yu 7548527e52SMonk Liu static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) { 7648527e52SMonk Liu return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 7748527e52SMonk Liu } 7848527e52SMonk Liu 79c9c9de93SXiangliang Yu static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 80c9c9de93SXiangliang Yu { 8148527e52SMonk Liu int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT; 8248527e52SMonk Liu u8 reg; 83c9c9de93SXiangliang Yu 8448527e52SMonk Liu do { 8548527e52SMonk Liu reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 8648527e52SMonk Liu if (reg & 2) 8748527e52SMonk Liu return 0; 8848527e52SMonk Liu 8917b2e332SMonk Liu mdelay(5); 9017b2e332SMonk Liu timeout -= 5; 9148527e52SMonk Liu } while (timeout > 1); 92c9c9de93SXiangliang Yu 9348527e52SMonk Liu pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); 94c9c9de93SXiangliang Yu 9548527e52SMonk Liu return -ETIME; 96c9c9de93SXiangliang Yu } 97c9c9de93SXiangliang Yu 9894b4fd72SMonk Liu static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 99c9c9de93SXiangliang Yu { 10048527e52SMonk Liu int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT; 101c9c9de93SXiangliang Yu 10248527e52SMonk Liu do { 103c9c9de93SXiangliang Yu r = xgpu_ai_mailbox_rcv_msg(adev, event); 10448527e52SMonk Liu if (!r) 10548527e52SMonk Liu return 0; 106c9c9de93SXiangliang Yu 10748527e52SMonk Liu msleep(10); 10848527e52SMonk Liu timeout -= 10; 10948527e52SMonk Liu } while (timeout > 1); 110c9c9de93SXiangliang Yu 11148527e52SMonk Liu pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); 11248527e52SMonk Liu 11348527e52SMonk Liu return -ETIME; 114c9c9de93SXiangliang Yu } 115c9c9de93SXiangliang Yu 11689041940SGavin Wan static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 11789041940SGavin Wan enum idh_request req, u32 data1, u32 data2, u32 data3) { 11889041940SGavin Wan u32 reg; 119c9c9de93SXiangliang Yu int r; 12048527e52SMonk Liu uint8_t trn; 12148527e52SMonk Liu 12248527e52SMonk Liu /* IMPORTANT: 12348527e52SMonk Liu * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 12448527e52SMonk Liu * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 12548527e52SMonk Liu * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack() 12648527e52SMonk Liu * will return immediatly 12748527e52SMonk Liu */ 12848527e52SMonk Liu do { 12948527e52SMonk Liu xgpu_ai_mailbox_set_valid(adev, false); 13048527e52SMonk Liu trn = xgpu_ai_peek_ack(adev); 13148527e52SMonk Liu if (trn) { 13236b3f84aSColin Ian King pr_err("trn=%x ACK should not assert! wait again !\n", trn); 13348527e52SMonk Liu msleep(1); 13448527e52SMonk Liu } 13548527e52SMonk Liu } while(trn); 136c9c9de93SXiangliang Yu 13789041940SGavin Wan reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 13889041940SGavin Wan mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 13989041940SGavin Wan reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 14089041940SGavin Wan MSGBUF_DATA, req); 14189041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 14289041940SGavin Wan reg); 14389041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), 14489041940SGavin Wan data1); 14589041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), 14689041940SGavin Wan data2); 14789041940SGavin Wan WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), 14889041940SGavin Wan data3); 14989041940SGavin Wan 15089041940SGavin Wan xgpu_ai_mailbox_set_valid(adev, true); 151c9c9de93SXiangliang Yu 152c9c9de93SXiangliang Yu /* start to poll ack */ 153c9c9de93SXiangliang Yu r = xgpu_ai_poll_ack(adev); 154c9c9de93SXiangliang Yu if (r) 15517b2e332SMonk Liu pr_err("Doesn't get ack from pf, continue\n"); 156c9c9de93SXiangliang Yu 157c9c9de93SXiangliang Yu xgpu_ai_mailbox_set_valid(adev, false); 15889041940SGavin Wan } 15989041940SGavin Wan 160bb5a2bdfSYintian Tao static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) 161bb5a2bdfSYintian Tao { 162bb5a2bdfSYintian Tao int r = 0; 163bb5a2bdfSYintian Tao u32 req, val, size; 164bb5a2bdfSYintian Tao 165bb5a2bdfSYintian Tao if (!amdgim_is_hwperf(adev) || buf == NULL) 166bb5a2bdfSYintian Tao return -EBADRQC; 167bb5a2bdfSYintian Tao 168bb5a2bdfSYintian Tao switch(type) { 169bb5a2bdfSYintian Tao case PP_SCLK: 170bb5a2bdfSYintian Tao req = IDH_IRQ_GET_PP_SCLK; 171bb5a2bdfSYintian Tao break; 172bb5a2bdfSYintian Tao case PP_MCLK: 173bb5a2bdfSYintian Tao req = IDH_IRQ_GET_PP_MCLK; 174bb5a2bdfSYintian Tao break; 175bb5a2bdfSYintian Tao default: 176bb5a2bdfSYintian Tao return -EBADRQC; 177bb5a2bdfSYintian Tao } 178bb5a2bdfSYintian Tao 179bb5a2bdfSYintian Tao mutex_lock(&adev->virt.dpm_mutex); 180bb5a2bdfSYintian Tao 181bb5a2bdfSYintian Tao xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 182bb5a2bdfSYintian Tao 183bb5a2bdfSYintian Tao r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); 184bb5a2bdfSYintian Tao if (!r && adev->fw_vram_usage.va != NULL) { 185bb5a2bdfSYintian Tao val = RREG32_NO_KIQ( 186bb5a2bdfSYintian Tao SOC15_REG_OFFSET(NBIO, 0, 187bb5a2bdfSYintian Tao mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); 188bb5a2bdfSYintian Tao size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + 189bb5a2bdfSYintian Tao val), PAGE_SIZE); 190bb5a2bdfSYintian Tao 191bb5a2bdfSYintian Tao if (size < PAGE_SIZE) 192bb5a2bdfSYintian Tao strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); 193bb5a2bdfSYintian Tao else 194bb5a2bdfSYintian Tao size = 0; 195bb5a2bdfSYintian Tao 196bb5a2bdfSYintian Tao r = size; 197bb5a2bdfSYintian Tao goto out; 198bb5a2bdfSYintian Tao } 199bb5a2bdfSYintian Tao 200bb5a2bdfSYintian Tao r = xgpu_ai_poll_msg(adev, IDH_FAIL); 201bb5a2bdfSYintian Tao if(r) 202bb5a2bdfSYintian Tao pr_info("%s DPM request failed", 203bb5a2bdfSYintian Tao (type == PP_SCLK)? "SCLK" : "MCLK"); 204bb5a2bdfSYintian Tao 205bb5a2bdfSYintian Tao out: 206bb5a2bdfSYintian Tao mutex_unlock(&adev->virt.dpm_mutex); 207bb5a2bdfSYintian Tao return r; 208bb5a2bdfSYintian Tao } 209bb5a2bdfSYintian Tao 210bb5a2bdfSYintian Tao static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) 211bb5a2bdfSYintian Tao { 212bb5a2bdfSYintian Tao int r = 0; 213bb5a2bdfSYintian Tao u32 req = IDH_IRQ_FORCE_DPM_LEVEL; 214bb5a2bdfSYintian Tao 215bb5a2bdfSYintian Tao if (!amdgim_is_hwperf(adev)) 216bb5a2bdfSYintian Tao return -EBADRQC; 217bb5a2bdfSYintian Tao 218bb5a2bdfSYintian Tao mutex_lock(&adev->virt.dpm_mutex); 219bb5a2bdfSYintian Tao xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); 220bb5a2bdfSYintian Tao 221bb5a2bdfSYintian Tao r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); 222bb5a2bdfSYintian Tao if (!r) 223bb5a2bdfSYintian Tao goto out; 224bb5a2bdfSYintian Tao 225bb5a2bdfSYintian Tao r = xgpu_ai_poll_msg(adev, IDH_FAIL); 226bb5a2bdfSYintian Tao if (!r) 227bb5a2bdfSYintian Tao pr_info("DPM request failed"); 228bb5a2bdfSYintian Tao else 229bb5a2bdfSYintian Tao pr_info("Mailbox is broken"); 230bb5a2bdfSYintian Tao 231bb5a2bdfSYintian Tao out: 232bb5a2bdfSYintian Tao mutex_unlock(&adev->virt.dpm_mutex); 233bb5a2bdfSYintian Tao return r; 234bb5a2bdfSYintian Tao } 235bb5a2bdfSYintian Tao 23689041940SGavin Wan static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 23789041940SGavin Wan enum idh_request req) 23889041940SGavin Wan { 23989041940SGavin Wan int r; 24089041940SGavin Wan 24189041940SGavin Wan xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 242c9c9de93SXiangliang Yu 243c9c9de93SXiangliang Yu /* start to check msg if request is idh_req_gpu_init_access */ 244c9c9de93SXiangliang Yu if (req == IDH_REQ_GPU_INIT_ACCESS || 245c9c9de93SXiangliang Yu req == IDH_REQ_GPU_FINI_ACCESS || 246c9c9de93SXiangliang Yu req == IDH_REQ_GPU_RESET_ACCESS) { 24794b4fd72SMonk Liu r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 24817b2e332SMonk Liu if (r) { 24917b2e332SMonk Liu pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 250c9c9de93SXiangliang Yu return r; 251c9c9de93SXiangliang Yu } 2522dc8f81eSHorace Chen /* Retrieve checksum from mailbox2 */ 253d3c117e5SEmily Deng if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 2542dc8f81eSHorace Chen adev->virt.fw_reserve.checksum_key = 2552dc8f81eSHorace Chen RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 2562dc8f81eSHorace Chen mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 2572dc8f81eSHorace Chen } 25817b2e332SMonk Liu } 259c9c9de93SXiangliang Yu 260c9c9de93SXiangliang Yu return 0; 261c9c9de93SXiangliang Yu } 262c9c9de93SXiangliang Yu 263f98b617eSMonk Liu static int xgpu_ai_request_reset(struct amdgpu_device *adev) 264f98b617eSMonk Liu { 265f98b617eSMonk Liu return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 266f98b617eSMonk Liu } 267f98b617eSMonk Liu 268c9c9de93SXiangliang Yu static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 269c9c9de93SXiangliang Yu bool init) 270c9c9de93SXiangliang Yu { 271c9c9de93SXiangliang Yu enum idh_request req; 272c9c9de93SXiangliang Yu 273c9c9de93SXiangliang Yu req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 274c9c9de93SXiangliang Yu return xgpu_ai_send_access_requests(adev, req); 275c9c9de93SXiangliang Yu } 276c9c9de93SXiangliang Yu 277c9c9de93SXiangliang Yu static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 278c9c9de93SXiangliang Yu bool init) 279c9c9de93SXiangliang Yu { 280c9c9de93SXiangliang Yu enum idh_request req; 281c9c9de93SXiangliang Yu int r = 0; 282c9c9de93SXiangliang Yu 283c9c9de93SXiangliang Yu req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 284c9c9de93SXiangliang Yu r = xgpu_ai_send_access_requests(adev, req); 285c9c9de93SXiangliang Yu 286c9c9de93SXiangliang Yu return r; 287c9c9de93SXiangliang Yu } 288c9c9de93SXiangliang Yu 289f98b617eSMonk Liu static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 290f98b617eSMonk Liu struct amdgpu_irq_src *source, 291f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 292f98b617eSMonk Liu { 293034b6867SXiangliang Yu DRM_DEBUG("get ack intr and do nothing.\n"); 294f98b617eSMonk Liu return 0; 295f98b617eSMonk Liu } 296f98b617eSMonk Liu 297f98b617eSMonk Liu static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 298f98b617eSMonk Liu struct amdgpu_irq_src *source, 299f98b617eSMonk Liu unsigned type, 300f98b617eSMonk Liu enum amdgpu_interrupt_state state) 301f98b617eSMonk Liu { 302f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 303f98b617eSMonk Liu 304f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 305f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 306f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 307f98b617eSMonk Liu 308f98b617eSMonk Liu return 0; 309f98b617eSMonk Liu } 310f98b617eSMonk Liu 311f98b617eSMonk Liu static void xgpu_ai_mailbox_flr_work(struct work_struct *work) 312f98b617eSMonk Liu { 313f98b617eSMonk Liu struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 314f98b617eSMonk Liu struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 31548527e52SMonk Liu int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; 31648527e52SMonk Liu int locked; 317f98b617eSMonk Liu 31848527e52SMonk Liu /* block amdgpu_gpu_recover till msg FLR COMPLETE received, 31948527e52SMonk Liu * otherwise the mailbox msg will be ruined/reseted by 32048527e52SMonk Liu * the VF FLR. 32148527e52SMonk Liu * 32248527e52SMonk Liu * we can unlock the lock_reset to allow "amdgpu_job_timedout" 32348527e52SMonk Liu * to run gpu_recover() after FLR_NOTIFICATION_CMPL received 32448527e52SMonk Liu * which means host side had finished this VF's FLR. 32548527e52SMonk Liu */ 32648527e52SMonk Liu locked = mutex_trylock(&adev->lock_reset); 32748527e52SMonk Liu if (locked) 32848527e52SMonk Liu adev->in_gpu_reset = 1; 329f98b617eSMonk Liu 33048527e52SMonk Liu do { 33148527e52SMonk Liu if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) 33248527e52SMonk Liu goto flr_done; 33348527e52SMonk Liu 33448527e52SMonk Liu msleep(10); 33548527e52SMonk Liu timeout -= 10; 33648527e52SMonk Liu } while (timeout > 1); 33748527e52SMonk Liu 33848527e52SMonk Liu flr_done: 3396e9c2b88SEmily Deng if (locked) { 3406e9c2b88SEmily Deng adev->in_gpu_reset = 0; 34148527e52SMonk Liu mutex_unlock(&adev->lock_reset); 3426e9c2b88SEmily Deng } 34348527e52SMonk Liu 34448527e52SMonk Liu /* Trigger recovery for world switch failure if no TDR */ 3452c11ee6aSwentalou if (amdgpu_device_should_recover_gpu(adev) 3462c11ee6aSwentalou && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT) 34712938fadSChristian König amdgpu_device_gpu_recover(adev, NULL); 348f98b617eSMonk Liu } 349f98b617eSMonk Liu 350f98b617eSMonk Liu static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 351f98b617eSMonk Liu struct amdgpu_irq_src *src, 352f98b617eSMonk Liu unsigned type, 353f98b617eSMonk Liu enum amdgpu_interrupt_state state) 354f98b617eSMonk Liu { 355f98b617eSMonk Liu u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 356f98b617eSMonk Liu 357f98b617eSMonk Liu tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 358f98b617eSMonk Liu (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 359f98b617eSMonk Liu WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 360f98b617eSMonk Liu 361f98b617eSMonk Liu return 0; 362f98b617eSMonk Liu } 363f98b617eSMonk Liu 364f98b617eSMonk Liu static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 365f98b617eSMonk Liu struct amdgpu_irq_src *source, 366f98b617eSMonk Liu struct amdgpu_iv_entry *entry) 367f98b617eSMonk Liu { 36848527e52SMonk Liu enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); 369f98b617eSMonk Liu 37048527e52SMonk Liu switch (event) { 37148527e52SMonk Liu case IDH_FLR_NOTIFICATION: 37248527e52SMonk Liu if (amdgpu_sriov_runtime(adev)) 373f98b617eSMonk Liu schedule_work(&adev->virt.flr_work); 37448527e52SMonk Liu break; 375*b6818520STrigger Huang case IDH_QUERY_ALIVE: 376*b6818520STrigger Huang xgpu_ai_mailbox_send_ack(adev); 377*b6818520STrigger Huang break; 37848527e52SMonk Liu /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 37948527e52SMonk Liu * it byfar since that polling thread will handle it, 38048527e52SMonk Liu * other msg like flr complete is not handled here. 38148527e52SMonk Liu */ 38248527e52SMonk Liu case IDH_CLR_MSG_BUF: 38348527e52SMonk Liu case IDH_FLR_NOTIFICATION_CMPL: 38448527e52SMonk Liu case IDH_READY_TO_ACCESS_GPU: 38548527e52SMonk Liu default: 38648527e52SMonk Liu break; 3870c63e113SMonk Liu } 388f98b617eSMonk Liu 389f98b617eSMonk Liu return 0; 390f98b617eSMonk Liu } 391f98b617eSMonk Liu 392f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 393f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_ack_irq, 394f98b617eSMonk Liu .process = xgpu_ai_mailbox_ack_irq, 395f98b617eSMonk Liu }; 396f98b617eSMonk Liu 397f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 398f98b617eSMonk Liu .set = xgpu_ai_set_mailbox_rcv_irq, 399f98b617eSMonk Liu .process = xgpu_ai_mailbox_rcv_irq, 400f98b617eSMonk Liu }; 401f98b617eSMonk Liu 402f98b617eSMonk Liu void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 403f98b617eSMonk Liu { 404f98b617eSMonk Liu adev->virt.ack_irq.num_types = 1; 405f98b617eSMonk Liu adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 406f98b617eSMonk Liu adev->virt.rcv_irq.num_types = 1; 407f98b617eSMonk Liu adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 408f98b617eSMonk Liu } 409f98b617eSMonk Liu 410f98b617eSMonk Liu int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 411f98b617eSMonk Liu { 412f98b617eSMonk Liu int r; 413f98b617eSMonk Liu 4143760f76cSOak Zeng r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 415f98b617eSMonk Liu if (r) 416f98b617eSMonk Liu return r; 417f98b617eSMonk Liu 4183760f76cSOak Zeng r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 419f98b617eSMonk Liu if (r) { 420f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 421f98b617eSMonk Liu return r; 422f98b617eSMonk Liu } 423f98b617eSMonk Liu 424f98b617eSMonk Liu return 0; 425f98b617eSMonk Liu } 426f98b617eSMonk Liu 427f98b617eSMonk Liu int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 428f98b617eSMonk Liu { 429f98b617eSMonk Liu int r; 430f98b617eSMonk Liu 431f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 432f98b617eSMonk Liu if (r) 433f98b617eSMonk Liu return r; 434f98b617eSMonk Liu r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 435f98b617eSMonk Liu if (r) { 436f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 437f98b617eSMonk Liu return r; 438f98b617eSMonk Liu } 439f98b617eSMonk Liu 440f98b617eSMonk Liu INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 441f98b617eSMonk Liu 442f98b617eSMonk Liu return 0; 443f98b617eSMonk Liu } 444f98b617eSMonk Liu 445f98b617eSMonk Liu void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 446f98b617eSMonk Liu { 447f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 448f98b617eSMonk Liu amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 449f98b617eSMonk Liu } 450f98b617eSMonk Liu 451c9c9de93SXiangliang Yu const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 452c9c9de93SXiangliang Yu .req_full_gpu = xgpu_ai_request_full_gpu_access, 453c9c9de93SXiangliang Yu .rel_full_gpu = xgpu_ai_release_full_gpu_access, 454f98b617eSMonk Liu .reset_gpu = xgpu_ai_request_reset, 455b5914238Spding .wait_reset = NULL, 45689041940SGavin Wan .trans_msg = xgpu_ai_mailbox_trans_msg, 457bb5a2bdfSYintian Tao .get_pp_clk = xgpu_ai_get_pp_clk, 458bb5a2bdfSYintian Tao .force_dpm_level = xgpu_ai_force_dpm_level, 459c9c9de93SXiangliang Yu }; 460