xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1c9c9de93SXiangliang Yu /*
2c9c9de93SXiangliang Yu  * Copyright 2014 Advanced Micro Devices, Inc.
3c9c9de93SXiangliang Yu  *
4c9c9de93SXiangliang Yu  * Permission is hereby granted, free of charge, to any person obtaining a
5c9c9de93SXiangliang Yu  * copy of this software and associated documentation files (the "Software"),
6c9c9de93SXiangliang Yu  * to deal in the Software without restriction, including without limitation
7c9c9de93SXiangliang Yu  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8c9c9de93SXiangliang Yu  * and/or sell copies of the Software, and to permit persons to whom the
9c9c9de93SXiangliang Yu  * Software is furnished to do so, subject to the following conditions:
10c9c9de93SXiangliang Yu  *
11c9c9de93SXiangliang Yu  * The above copyright notice and this permission notice shall be included in
12c9c9de93SXiangliang Yu  * all copies or substantial portions of the Software.
13c9c9de93SXiangliang Yu  *
14c9c9de93SXiangliang Yu  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15c9c9de93SXiangliang Yu  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16c9c9de93SXiangliang Yu  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17c9c9de93SXiangliang Yu  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18c9c9de93SXiangliang Yu  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19c9c9de93SXiangliang Yu  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20c9c9de93SXiangliang Yu  * OTHER DEALINGS IN THE SOFTWARE.
21c9c9de93SXiangliang Yu  *
22c9c9de93SXiangliang Yu  */
23c9c9de93SXiangliang Yu 
24c9c9de93SXiangliang Yu #include "amdgpu.h"
25f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_offset.h"
26f0a58aa3SFeifei Xu #include "nbio/nbio_6_1_sh_mask.h"
27cde5c34fSFeifei Xu #include "gc/gc_9_0_offset.h"
28cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
2978d48112STrigger Huang #include "mp/mp_9_0_offset.h"
30c9c9de93SXiangliang Yu #include "soc15.h"
31f98b617eSMonk Liu #include "vega10_ih.h"
32c9c9de93SXiangliang Yu #include "soc15_common.h"
33c9c9de93SXiangliang Yu #include "mxgpu_ai.h"
34c9c9de93SXiangliang Yu 
35cfbb6b00SAndrey Grodzovsky #include "amdgpu_reset.h"
36cfbb6b00SAndrey Grodzovsky 
xgpu_ai_mailbox_send_ack(struct amdgpu_device * adev)37c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
38c9c9de93SXiangliang Yu {
3948527e52SMonk Liu 	WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
40c9c9de93SXiangliang Yu }
41c9c9de93SXiangliang Yu 
xgpu_ai_mailbox_set_valid(struct amdgpu_device * adev,bool val)42c9c9de93SXiangliang Yu static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
43c9c9de93SXiangliang Yu {
4448527e52SMonk Liu 	WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
45c9c9de93SXiangliang Yu }
46c9c9de93SXiangliang Yu 
4748527e52SMonk Liu /*
4848527e52SMonk Liu  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
4948527e52SMonk Liu  * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
5048527e52SMonk Liu  * by host.
5148527e52SMonk Liu  *
5248527e52SMonk Liu  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
5348527e52SMonk Liu  * correct value since it doesn't return the RCV_DW0 under the case that
5448527e52SMonk Liu  * RCV_MSG_VALID is set by host.
5548527e52SMonk Liu  */
xgpu_ai_mailbox_peek_msg(struct amdgpu_device * adev)5648527e52SMonk Liu static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
5748527e52SMonk Liu {
5848527e52SMonk Liu 	return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
5948527e52SMonk Liu 				mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
6048527e52SMonk Liu }
6148527e52SMonk Liu 
6248527e52SMonk Liu 
xgpu_ai_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)63c9c9de93SXiangliang Yu static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
64c9c9de93SXiangliang Yu 				   enum idh_event event)
65c9c9de93SXiangliang Yu {
66c9c9de93SXiangliang Yu 	u32 reg;
67c9c9de93SXiangliang Yu 
68c9c9de93SXiangliang Yu 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
69c9c9de93SXiangliang Yu 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
70c9c9de93SXiangliang Yu 	if (reg != event)
71c9c9de93SXiangliang Yu 		return -ENOENT;
72c9c9de93SXiangliang Yu 
73c9c9de93SXiangliang Yu 	xgpu_ai_mailbox_send_ack(adev);
74c9c9de93SXiangliang Yu 
75c9c9de93SXiangliang Yu 	return 0;
76c9c9de93SXiangliang Yu }
77c9c9de93SXiangliang Yu 
xgpu_ai_peek_ack(struct amdgpu_device * adev)7848527e52SMonk Liu static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
7948527e52SMonk Liu 	return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
8048527e52SMonk Liu }
8148527e52SMonk Liu 
xgpu_ai_poll_ack(struct amdgpu_device * adev)82c9c9de93SXiangliang Yu static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
83c9c9de93SXiangliang Yu {
8448527e52SMonk Liu 	int timeout  = AI_MAILBOX_POLL_ACK_TIMEDOUT;
8548527e52SMonk Liu 	u8 reg;
86c9c9de93SXiangliang Yu 
8748527e52SMonk Liu 	do {
8848527e52SMonk Liu 		reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
8948527e52SMonk Liu 		if (reg & 2)
9048527e52SMonk Liu 			return 0;
9148527e52SMonk Liu 
9217b2e332SMonk Liu 		mdelay(5);
9317b2e332SMonk Liu 		timeout -= 5;
9448527e52SMonk Liu 	} while (timeout > 1);
95c9c9de93SXiangliang Yu 
9629b6985dSVignesh Chander 	dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
97c9c9de93SXiangliang Yu 
9848527e52SMonk Liu 	return -ETIME;
99c9c9de93SXiangliang Yu }
100c9c9de93SXiangliang Yu 
xgpu_ai_poll_msg(struct amdgpu_device * adev,enum idh_event event)10194b4fd72SMonk Liu static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
102c9c9de93SXiangliang Yu {
10348527e52SMonk Liu 	int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
104c9c9de93SXiangliang Yu 
10548527e52SMonk Liu 	do {
106c9c9de93SXiangliang Yu 		r = xgpu_ai_mailbox_rcv_msg(adev, event);
10748527e52SMonk Liu 		if (!r)
10848527e52SMonk Liu 			return 0;
109c9c9de93SXiangliang Yu 
11048527e52SMonk Liu 		msleep(10);
11148527e52SMonk Liu 		timeout -= 10;
11248527e52SMonk Liu 	} while (timeout > 1);
113c9c9de93SXiangliang Yu 
11429b6985dSVignesh Chander 	dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
11548527e52SMonk Liu 
11648527e52SMonk Liu 	return -ETIME;
117c9c9de93SXiangliang Yu }
118c9c9de93SXiangliang Yu 
xgpu_ai_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)11989041940SGavin Wan static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
12089041940SGavin Wan 	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
12189041940SGavin Wan 	u32 reg;
122c9c9de93SXiangliang Yu 	int r;
12348527e52SMonk Liu 	uint8_t trn;
12448527e52SMonk Liu 
12548527e52SMonk Liu 	/* IMPORTANT:
12648527e52SMonk Liu 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
12748527e52SMonk Liu 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
12848527e52SMonk Liu 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
12948527e52SMonk Liu 	 * will return immediatly
13048527e52SMonk Liu 	 */
13148527e52SMonk Liu 	do {
13248527e52SMonk Liu 		xgpu_ai_mailbox_set_valid(adev, false);
13348527e52SMonk Liu 		trn = xgpu_ai_peek_ack(adev);
13448527e52SMonk Liu 		if (trn) {
13529b6985dSVignesh Chander 			dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
13648527e52SMonk Liu 			msleep(1);
13748527e52SMonk Liu 		}
13848527e52SMonk Liu 	} while(trn);
139c9c9de93SXiangliang Yu 
14089041940SGavin Wan 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
14189041940SGavin Wan 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
14289041940SGavin Wan 	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
14389041940SGavin Wan 			    MSGBUF_DATA, req);
14489041940SGavin Wan 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
14589041940SGavin Wan 		      reg);
14689041940SGavin Wan 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
14789041940SGavin Wan 				data1);
14889041940SGavin Wan 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
14989041940SGavin Wan 				data2);
15089041940SGavin Wan 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
15189041940SGavin Wan 				data3);
15289041940SGavin Wan 
15389041940SGavin Wan 	xgpu_ai_mailbox_set_valid(adev, true);
154c9c9de93SXiangliang Yu 
155c9c9de93SXiangliang Yu 	/* start to poll ack */
156c9c9de93SXiangliang Yu 	r = xgpu_ai_poll_ack(adev);
157c9c9de93SXiangliang Yu 	if (r)
15829b6985dSVignesh Chander 		dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
159c9c9de93SXiangliang Yu 
160c9c9de93SXiangliang Yu 	xgpu_ai_mailbox_set_valid(adev, false);
16189041940SGavin Wan }
16289041940SGavin Wan 
xgpu_ai_send_access_requests(struct amdgpu_device * adev,enum idh_request req)16389041940SGavin Wan static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
16489041940SGavin Wan 					enum idh_request req)
16589041940SGavin Wan {
16689041940SGavin Wan 	int r;
16789041940SGavin Wan 
16889041940SGavin Wan 	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
169c9c9de93SXiangliang Yu 
170c9c9de93SXiangliang Yu 	/* start to check msg if request is idh_req_gpu_init_access */
171c9c9de93SXiangliang Yu 	if (req == IDH_REQ_GPU_INIT_ACCESS ||
172c9c9de93SXiangliang Yu 		req == IDH_REQ_GPU_FINI_ACCESS ||
173c9c9de93SXiangliang Yu 		req == IDH_REQ_GPU_RESET_ACCESS) {
17494b4fd72SMonk Liu 		r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
17517b2e332SMonk Liu 		if (r) {
17629b6985dSVignesh Chander 			dev_err(adev->dev, "Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
177c9c9de93SXiangliang Yu 			return r;
178c9c9de93SXiangliang Yu 		}
1792dc8f81eSHorace Chen 		/* Retrieve checksum from mailbox2 */
180d3c117e5SEmily Deng 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
1812dc8f81eSHorace Chen 			adev->virt.fw_reserve.checksum_key =
1822dc8f81eSHorace Chen 				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
1832dc8f81eSHorace Chen 					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
1842dc8f81eSHorace Chen 		}
185216a9873SJames Yao 	} else if (req == IDH_REQ_GPU_INIT_DATA){
186216a9873SJames Yao 		/* Dummy REQ_GPU_INIT_DATA handling */
187216a9873SJames Yao 		r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
188216a9873SJames Yao 		/* version set to 0 since dummy */
189216a9873SJames Yao 		adev->virt.req_init_data_ver = 0;
19017b2e332SMonk Liu 	}
191c9c9de93SXiangliang Yu 
192c9c9de93SXiangliang Yu 	return 0;
193c9c9de93SXiangliang Yu }
194c9c9de93SXiangliang Yu 
xgpu_ai_request_reset(struct amdgpu_device * adev)195f98b617eSMonk Liu static int xgpu_ai_request_reset(struct amdgpu_device *adev)
196f98b617eSMonk Liu {
1973aa883acSJiange Zhao 	int ret, i = 0;
1983aa883acSJiange Zhao 
1993aa883acSJiange Zhao 	while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
2003aa883acSJiange Zhao 		ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
2013aa883acSJiange Zhao 		if (!ret)
2023aa883acSJiange Zhao 			break;
2033aa883acSJiange Zhao 		i++;
2043aa883acSJiange Zhao 	}
2053aa883acSJiange Zhao 
2063aa883acSJiange Zhao 	return ret;
207f98b617eSMonk Liu }
208f98b617eSMonk Liu 
xgpu_ai_request_full_gpu_access(struct amdgpu_device * adev,bool init)209c9c9de93SXiangliang Yu static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
210c9c9de93SXiangliang Yu 					   bool init)
211c9c9de93SXiangliang Yu {
212c9c9de93SXiangliang Yu 	enum idh_request req;
213c9c9de93SXiangliang Yu 
214c9c9de93SXiangliang Yu 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
215c9c9de93SXiangliang Yu 	return xgpu_ai_send_access_requests(adev, req);
216c9c9de93SXiangliang Yu }
217c9c9de93SXiangliang Yu 
xgpu_ai_release_full_gpu_access(struct amdgpu_device * adev,bool init)218c9c9de93SXiangliang Yu static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
219c9c9de93SXiangliang Yu 					   bool init)
220c9c9de93SXiangliang Yu {
221c9c9de93SXiangliang Yu 	enum idh_request req;
222c9c9de93SXiangliang Yu 	int r = 0;
223c9c9de93SXiangliang Yu 
224c9c9de93SXiangliang Yu 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
225c9c9de93SXiangliang Yu 	r = xgpu_ai_send_access_requests(adev, req);
226c9c9de93SXiangliang Yu 
227c9c9de93SXiangliang Yu 	return r;
228c9c9de93SXiangliang Yu }
229c9c9de93SXiangliang Yu 
xgpu_ai_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)230f98b617eSMonk Liu static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
231f98b617eSMonk Liu 					struct amdgpu_irq_src *source,
232f98b617eSMonk Liu 					struct amdgpu_iv_entry *entry)
233f98b617eSMonk Liu {
23429b6985dSVignesh Chander 	dev_dbg(adev->dev, "get ack intr and do nothing.\n");
235f98b617eSMonk Liu 	return 0;
236f98b617eSMonk Liu }
237f98b617eSMonk Liu 
xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)238f98b617eSMonk Liu static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
239f98b617eSMonk Liu 					struct amdgpu_irq_src *source,
240f98b617eSMonk Liu 					unsigned type,
241f98b617eSMonk Liu 					enum amdgpu_interrupt_state state)
242f98b617eSMonk Liu {
243f98b617eSMonk Liu 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
244f98b617eSMonk Liu 
245f98b617eSMonk Liu 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
246f98b617eSMonk Liu 				(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
247f98b617eSMonk Liu 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
248f98b617eSMonk Liu 
249f98b617eSMonk Liu 	return 0;
250f98b617eSMonk Liu }
251f98b617eSMonk Liu 
xgpu_ai_ready_to_reset(struct amdgpu_device * adev)2525c0a1cddSYunxiang Li static void xgpu_ai_ready_to_reset(struct amdgpu_device *adev)
2535c0a1cddSYunxiang Li {
2545c0a1cddSYunxiang Li 	xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
2555c0a1cddSYunxiang Li }
2565c0a1cddSYunxiang Li 
xgpu_ai_wait_reset(struct amdgpu_device * adev)2575c0a1cddSYunxiang Li static int xgpu_ai_wait_reset(struct amdgpu_device *adev)
2585c0a1cddSYunxiang Li {
2595c0a1cddSYunxiang Li 	int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
2605c0a1cddSYunxiang Li 	do {
26129b6985dSVignesh Chander 		if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
26229b6985dSVignesh Chander 			dev_dbg(adev->dev, "Got AI IDH_FLR_NOTIFICATION_CMPL after %d ms\n", AI_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
2635c0a1cddSYunxiang Li 			return 0;
26429b6985dSVignesh Chander 		}
2655c0a1cddSYunxiang Li 		msleep(10);
2665c0a1cddSYunxiang Li 		timeout -= 10;
2675c0a1cddSYunxiang Li 	} while (timeout > 1);
26829b6985dSVignesh Chander 
26929b6985dSVignesh Chander 	dev_dbg(adev->dev, "waiting AI IDH_FLR_NOTIFICATION_CMPL timeout\n");
2705c0a1cddSYunxiang Li 	return -ETIME;
2715c0a1cddSYunxiang Li }
2725c0a1cddSYunxiang Li 
xgpu_ai_mailbox_flr_work(struct work_struct * work)273f98b617eSMonk Liu static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
274f98b617eSMonk Liu {
275f98b617eSMonk Liu 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
276f98b617eSMonk Liu 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
277fa4a427dSVictor Skvortsov 
2783c2a01cbSJack Zhang 	amdgpu_virt_fini_data_exchange(adev);
279f1403342SChristian König 
28048527e52SMonk Liu 	/* Trigger recovery for world switch failure if no TDR */
2812c11ee6aSwentalou 	if (amdgpu_device_should_recover_gpu(adev)
2822a9787dcSLiu ChengZhe 		&& (!amdgpu_device_has_job_running(adev) ||
283f1549c09SLikun Gao 			adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
284f1549c09SLikun Gao 		struct amdgpu_reset_context reset_context;
285f1549c09SLikun Gao 		memset(&reset_context, 0, sizeof(reset_context));
286f1549c09SLikun Gao 
287f1549c09SLikun Gao 		reset_context.method = AMD_RESET_METHOD_NONE;
288f1549c09SLikun Gao 		reset_context.reset_req_dev = adev;
289f1549c09SLikun Gao 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
29025c01191SYunxiang Li 		set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
291f1549c09SLikun Gao 
292f1549c09SLikun Gao 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
293f1549c09SLikun Gao 	}
294f98b617eSMonk Liu }
295f98b617eSMonk Liu 
xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)296f98b617eSMonk Liu static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
297f98b617eSMonk Liu 				       struct amdgpu_irq_src *src,
298f98b617eSMonk Liu 				       unsigned type,
299f98b617eSMonk Liu 				       enum amdgpu_interrupt_state state)
300f98b617eSMonk Liu {
301f98b617eSMonk Liu 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
302f98b617eSMonk Liu 
303f98b617eSMonk Liu 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
304f98b617eSMonk Liu 			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
305f98b617eSMonk Liu 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
306f98b617eSMonk Liu 
307f98b617eSMonk Liu 	return 0;
308f98b617eSMonk Liu }
309f98b617eSMonk Liu 
xgpu_ai_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)310f98b617eSMonk Liu static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
311f98b617eSMonk Liu 				   struct amdgpu_irq_src *source,
312f98b617eSMonk Liu 				   struct amdgpu_iv_entry *entry)
313f98b617eSMonk Liu {
31448527e52SMonk Liu 	enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
315f98b617eSMonk Liu 
31648527e52SMonk Liu 	switch (event) {
31748527e52SMonk Liu 		case IDH_FLR_NOTIFICATION:
318f4322b9fSYunxiang Li 		if (amdgpu_sriov_runtime(adev))
319cfbb6b00SAndrey Grodzovsky 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
32002599bc7SAndrey Grodzovsky 								&adev->virt.flr_work),
32102599bc7SAndrey Grodzovsky 				  "Failed to queue work! at %s",
32202599bc7SAndrey Grodzovsky 				  __func__);
32348527e52SMonk Liu 		break;
324b6818520STrigger Huang 		case IDH_QUERY_ALIVE:
325b6818520STrigger Huang 			xgpu_ai_mailbox_send_ack(adev);
326b6818520STrigger Huang 			break;
32748527e52SMonk Liu 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
32848527e52SMonk Liu 		 * it byfar since that polling thread will handle it,
32948527e52SMonk Liu 		 * other msg like flr complete is not handled here.
33048527e52SMonk Liu 		 */
33148527e52SMonk Liu 		case IDH_CLR_MSG_BUF:
33248527e52SMonk Liu 		case IDH_FLR_NOTIFICATION_CMPL:
33348527e52SMonk Liu 		case IDH_READY_TO_ACCESS_GPU:
33448527e52SMonk Liu 		default:
33548527e52SMonk Liu 		break;
3360c63e113SMonk Liu 	}
337f98b617eSMonk Liu 
338f98b617eSMonk Liu 	return 0;
339f98b617eSMonk Liu }
340f98b617eSMonk Liu 
341f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
342f98b617eSMonk Liu 	.set = xgpu_ai_set_mailbox_ack_irq,
343f98b617eSMonk Liu 	.process = xgpu_ai_mailbox_ack_irq,
344f98b617eSMonk Liu };
345f98b617eSMonk Liu 
346f98b617eSMonk Liu static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
347f98b617eSMonk Liu 	.set = xgpu_ai_set_mailbox_rcv_irq,
348f98b617eSMonk Liu 	.process = xgpu_ai_mailbox_rcv_irq,
349f98b617eSMonk Liu };
350f98b617eSMonk Liu 
xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device * adev)351f98b617eSMonk Liu void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
352f98b617eSMonk Liu {
353f98b617eSMonk Liu 	adev->virt.ack_irq.num_types = 1;
354f98b617eSMonk Liu 	adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
355f98b617eSMonk Liu 	adev->virt.rcv_irq.num_types = 1;
356f98b617eSMonk Liu 	adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
357f98b617eSMonk Liu }
358f98b617eSMonk Liu 
xgpu_ai_mailbox_add_irq_id(struct amdgpu_device * adev)359f98b617eSMonk Liu int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
360f98b617eSMonk Liu {
361f98b617eSMonk Liu 	int r;
362f98b617eSMonk Liu 
3633760f76cSOak Zeng 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
364f98b617eSMonk Liu 	if (r)
365f98b617eSMonk Liu 		return r;
366f98b617eSMonk Liu 
3673760f76cSOak Zeng 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
368f98b617eSMonk Liu 	if (r) {
369f98b617eSMonk Liu 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
370f98b617eSMonk Liu 		return r;
371f98b617eSMonk Liu 	}
372f98b617eSMonk Liu 
373f98b617eSMonk Liu 	return 0;
374f98b617eSMonk Liu }
375f98b617eSMonk Liu 
xgpu_ai_mailbox_get_irq(struct amdgpu_device * adev)376f98b617eSMonk Liu int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
377f98b617eSMonk Liu {
378f98b617eSMonk Liu 	int r;
379f98b617eSMonk Liu 
380f98b617eSMonk Liu 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
381f98b617eSMonk Liu 	if (r)
382f98b617eSMonk Liu 		return r;
383f98b617eSMonk Liu 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
384f98b617eSMonk Liu 	if (r) {
385f98b617eSMonk Liu 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
386f98b617eSMonk Liu 		return r;
387f98b617eSMonk Liu 	}
388f98b617eSMonk Liu 
389f98b617eSMonk Liu 	INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
390f98b617eSMonk Liu 
391f98b617eSMonk Liu 	return 0;
392f98b617eSMonk Liu }
393f98b617eSMonk Liu 
xgpu_ai_mailbox_put_irq(struct amdgpu_device * adev)394f98b617eSMonk Liu void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
395f98b617eSMonk Liu {
396f98b617eSMonk Liu 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
397f98b617eSMonk Liu 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
398f98b617eSMonk Liu }
399f98b617eSMonk Liu 
xgpu_ai_request_init_data(struct amdgpu_device * adev)400216a9873SJames Yao static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
401216a9873SJames Yao {
402216a9873SJames Yao 	return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
403216a9873SJames Yao }
404216a9873SJames Yao 
xgpu_ai_ras_poison_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block)405ed1e1e42SYiPeng Chai static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev,
406ed1e1e42SYiPeng Chai 					enum amdgpu_ras_block block)
4078ede944dSTao Zhou {
4088ede944dSTao Zhou 	xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
4098ede944dSTao Zhou }
4108ede944dSTao Zhou 
xgpu_ai_rcvd_ras_intr(struct amdgpu_device * adev)411*cbda2758SVignesh Chander static bool xgpu_ai_rcvd_ras_intr(struct amdgpu_device *adev)
412*cbda2758SVignesh Chander {
413*cbda2758SVignesh Chander 	enum idh_event msg = xgpu_ai_mailbox_peek_msg(adev);
414*cbda2758SVignesh Chander 
415*cbda2758SVignesh Chander 	return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
416*cbda2758SVignesh Chander }
417*cbda2758SVignesh Chander 
418c9c9de93SXiangliang Yu const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
419c9c9de93SXiangliang Yu 	.req_full_gpu	= xgpu_ai_request_full_gpu_access,
420c9c9de93SXiangliang Yu 	.rel_full_gpu	= xgpu_ai_release_full_gpu_access,
421f98b617eSMonk Liu 	.reset_gpu = xgpu_ai_request_reset,
4225c0a1cddSYunxiang Li 	.ready_to_reset = xgpu_ai_ready_to_reset,
4235c0a1cddSYunxiang Li 	.wait_reset = xgpu_ai_wait_reset,
42489041940SGavin Wan 	.trans_msg = xgpu_ai_mailbox_trans_msg,
425216a9873SJames Yao 	.req_init_data  = xgpu_ai_request_init_data,
4268ede944dSTao Zhou 	.ras_poison_handler = xgpu_ai_ras_poison_handler,
427*cbda2758SVignesh Chander 	.rcvd_ras_intr = xgpu_ai_rcvd_ras_intr,
428c9c9de93SXiangliang Yu };
429