xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
35 {
36 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37 }
38 
39 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40 {
41 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42 }
43 
44 /*
45  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
47  * by host.
48  *
49  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50  * correct value since it doesn't return the RCV_DW0 under the case that
51  * RCV_MSG_VALID is set by host.
52  */
53 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
54 {
55 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
56 }
57 
58 
59 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
60 				   enum idh_event event)
61 {
62 	u32 reg;
63 
64 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
65 	if (reg != event)
66 		return -ENOENT;
67 
68 	xgpu_nv_mailbox_send_ack(adev);
69 
70 	return 0;
71 }
72 
73 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
74 {
75 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
76 }
77 
78 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
79 {
80 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
81 	u8 reg;
82 
83 	do {
84 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
85 		if (reg & 2)
86 			return 0;
87 
88 		mdelay(5);
89 		timeout -= 5;
90 	} while (timeout > 1);
91 
92 	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
93 
94 	return -ETIME;
95 }
96 
97 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
98 {
99 	int r;
100 	uint64_t timeout, now;
101 
102 	now = (uint64_t)ktime_to_ms(ktime_get());
103 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
104 
105 	do {
106 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
107 		if (!r)
108 			return 0;
109 
110 		msleep(10);
111 		now = (uint64_t)ktime_to_ms(ktime_get());
112 	} while (timeout > now);
113 
114 
115 	return -ETIME;
116 }
117 
118 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
119 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
120 {
121 	int r;
122 	uint8_t trn;
123 
124 	/* IMPORTANT:
125 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
126 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
127 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
128 	 * will return immediatly
129 	 */
130 	do {
131 		xgpu_nv_mailbox_set_valid(adev, false);
132 		trn = xgpu_nv_peek_ack(adev);
133 		if (trn) {
134 			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
135 			msleep(1);
136 		}
137 	} while (trn);
138 
139 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
140 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
141 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
142 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
143 	xgpu_nv_mailbox_set_valid(adev, true);
144 
145 	/* start to poll ack */
146 	r = xgpu_nv_poll_ack(adev);
147 	if (r)
148 		pr_err("Doesn't get ack from pf, continue\n");
149 
150 	xgpu_nv_mailbox_set_valid(adev, false);
151 }
152 
153 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
154 					enum idh_request req)
155 {
156 	int r, retry = 1;
157 	enum idh_event event = -1;
158 
159 send_request:
160 	xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
161 
162 	switch (req) {
163 	case IDH_REQ_GPU_INIT_ACCESS:
164 	case IDH_REQ_GPU_FINI_ACCESS:
165 	case IDH_REQ_GPU_RESET_ACCESS:
166 		event = IDH_READY_TO_ACCESS_GPU;
167 		break;
168 	case IDH_REQ_GPU_INIT_DATA:
169 		event = IDH_REQ_GPU_INIT_DATA_READY;
170 		break;
171 	default:
172 		break;
173 	}
174 
175 	if (event != -1) {
176 		r = xgpu_nv_poll_msg(adev, event);
177 		if (r) {
178 			if (retry++ < 2)
179 				goto send_request;
180 
181 			if (req != IDH_REQ_GPU_INIT_DATA) {
182 				pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
183 				return r;
184 			}
185 			else /* host doesn't support REQ_GPU_INIT_DATA handshake */
186 				adev->virt.req_init_data_ver = 0;
187 		} else {
188 			if (req == IDH_REQ_GPU_INIT_DATA)
189 			{
190 				adev->virt.req_init_data_ver =
191 					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
192 
193 				/* assume V1 in case host doesn't set version number */
194 				if (adev->virt.req_init_data_ver < 1)
195 					adev->virt.req_init_data_ver = 1;
196 			}
197 		}
198 
199 		/* Retrieve checksum from mailbox2 */
200 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
201 			adev->virt.fw_reserve.checksum_key =
202 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
203 		}
204 	}
205 
206 	return 0;
207 }
208 
209 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
210 {
211 	int ret, i = 0;
212 
213 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
214 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
215 		if (!ret)
216 			break;
217 		i++;
218 	}
219 
220 	return ret;
221 }
222 
223 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
224 					   bool init)
225 {
226 	enum idh_request req;
227 
228 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
229 	return xgpu_nv_send_access_requests(adev, req);
230 }
231 
232 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
233 					   bool init)
234 {
235 	enum idh_request req;
236 	int r = 0;
237 
238 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
239 	r = xgpu_nv_send_access_requests(adev, req);
240 
241 	return r;
242 }
243 
244 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
245 {
246 	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
247 }
248 
249 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
250 					struct amdgpu_irq_src *source,
251 					struct amdgpu_iv_entry *entry)
252 {
253 	DRM_DEBUG("get ack intr and do nothing.\n");
254 	return 0;
255 }
256 
257 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
258 					struct amdgpu_irq_src *source,
259 					unsigned type,
260 					enum amdgpu_interrupt_state state)
261 {
262 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
263 
264 	if (state == AMDGPU_IRQ_STATE_ENABLE)
265 		tmp |= 2;
266 	else
267 		tmp &= ~2;
268 
269 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
270 
271 	return 0;
272 }
273 
274 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
275 {
276 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
277 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
278 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
279 
280 	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
281 	 * otherwise the mailbox msg will be ruined/reseted by
282 	 * the VF FLR.
283 	 */
284 	if (!down_write_trylock(&adev->reset_sem))
285 		return;
286 
287 	amdgpu_virt_fini_data_exchange(adev);
288 	atomic_set(&adev->in_gpu_reset, 1);
289 
290 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
291 
292 	do {
293 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
294 			goto flr_done;
295 
296 		msleep(10);
297 		timeout -= 10;
298 	} while (timeout > 1);
299 
300 flr_done:
301 	atomic_set(&adev->in_gpu_reset, 0);
302 	up_write(&adev->reset_sem);
303 
304 	/* Trigger recovery for world switch failure if no TDR */
305 	if (amdgpu_device_should_recover_gpu(adev)
306 		&& (!amdgpu_device_has_job_running(adev) ||
307 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
308 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
309 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
310 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
311 		amdgpu_device_gpu_recover(adev, NULL);
312 }
313 
314 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
315 				       struct amdgpu_irq_src *src,
316 				       unsigned type,
317 				       enum amdgpu_interrupt_state state)
318 {
319 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
320 
321 	if (state == AMDGPU_IRQ_STATE_ENABLE)
322 		tmp |= 1;
323 	else
324 		tmp &= ~1;
325 
326 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
327 
328 	return 0;
329 }
330 
331 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
332 				   struct amdgpu_irq_src *source,
333 				   struct amdgpu_iv_entry *entry)
334 {
335 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
336 
337 	switch (event) {
338 	case IDH_FLR_NOTIFICATION:
339 		if (amdgpu_sriov_runtime(adev))
340 			schedule_work(&adev->virt.flr_work);
341 		break;
342 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
343 		 * it byfar since that polling thread will handle it,
344 		 * other msg like flr complete is not handled here.
345 		 */
346 	case IDH_CLR_MSG_BUF:
347 	case IDH_FLR_NOTIFICATION_CMPL:
348 	case IDH_READY_TO_ACCESS_GPU:
349 	default:
350 		break;
351 	}
352 
353 	return 0;
354 }
355 
356 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
357 	.set = xgpu_nv_set_mailbox_ack_irq,
358 	.process = xgpu_nv_mailbox_ack_irq,
359 };
360 
361 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
362 	.set = xgpu_nv_set_mailbox_rcv_irq,
363 	.process = xgpu_nv_mailbox_rcv_irq,
364 };
365 
366 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
367 {
368 	adev->virt.ack_irq.num_types = 1;
369 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
370 	adev->virt.rcv_irq.num_types = 1;
371 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
372 }
373 
374 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
375 {
376 	int r;
377 
378 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
379 	if (r)
380 		return r;
381 
382 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
383 	if (r) {
384 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
385 		return r;
386 	}
387 
388 	return 0;
389 }
390 
391 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
392 {
393 	int r;
394 
395 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
396 	if (r)
397 		return r;
398 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
399 	if (r) {
400 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
401 		return r;
402 	}
403 
404 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
405 
406 	return 0;
407 }
408 
409 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
410 {
411 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
412 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
413 }
414 
415 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
416 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
417 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
418 	.req_init_data  = xgpu_nv_request_init_data,
419 	.reset_gpu = xgpu_nv_request_reset,
420 	.wait_reset = NULL,
421 	.trans_msg = xgpu_nv_mailbox_trans_msg,
422 };
423