xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision 7f4f3b14e8079ecde096bd734af10e30d40c27b7)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 #include "amdgpu_reset.h"
35 
36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40 
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45 
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59 
60 
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	int r = 0;
65 	u32 reg;
66 
67 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 	if (reg == IDH_FAIL)
69 		r = -EINVAL;
70 	else if (reg != event)
71 		return -ENOENT;
72 
73 	xgpu_nv_mailbox_send_ack(adev);
74 
75 	return r;
76 }
77 
78 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
79 {
80 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
81 }
82 
83 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
84 {
85 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
86 	u8 reg;
87 
88 	do {
89 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
90 		if (reg & 2)
91 			return 0;
92 
93 		mdelay(5);
94 		timeout -= 5;
95 	} while (timeout > 1);
96 
97 	dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
98 
99 	return -ETIME;
100 }
101 
102 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
103 {
104 	int r;
105 	uint64_t timeout, now;
106 
107 	now = (uint64_t)ktime_to_ms(ktime_get());
108 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
109 
110 	do {
111 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
112 		if (!r) {
113 			dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
114 			return 0;
115 		}
116 
117 		msleep(10);
118 		now = (uint64_t)ktime_to_ms(ktime_get());
119 	} while (timeout > now);
120 
121 	dev_dbg(adev->dev, "nv_poll_msg timed out\n");
122 
123 	return -ETIME;
124 }
125 
126 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
127 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
128 {
129 	int r;
130 	uint8_t trn;
131 
132 	/* IMPORTANT:
133 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
134 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
135 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
136 	 * will return immediatly
137 	 */
138 	do {
139 		xgpu_nv_mailbox_set_valid(adev, false);
140 		trn = xgpu_nv_peek_ack(adev);
141 		if (trn) {
142 			dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
143 			msleep(1);
144 		}
145 	} while (trn);
146 
147 	dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
148 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
149 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
150 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
151 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
152 	xgpu_nv_mailbox_set_valid(adev, true);
153 
154 	/* start to poll ack */
155 	r = xgpu_nv_poll_ack(adev);
156 	if (r)
157 		dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
158 
159 	xgpu_nv_mailbox_set_valid(adev, false);
160 }
161 
162 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
163 			enum idh_request req, u32 data1, u32 data2, u32 data3)
164 {
165 	int r, retry = 1;
166 	enum idh_event event = -1;
167 
168 send_request:
169 	xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
170 
171 	switch (req) {
172 	case IDH_REQ_GPU_INIT_ACCESS:
173 	case IDH_REQ_GPU_FINI_ACCESS:
174 	case IDH_REQ_GPU_RESET_ACCESS:
175 		event = IDH_READY_TO_ACCESS_GPU;
176 		break;
177 	case IDH_REQ_GPU_INIT_DATA:
178 		event = IDH_REQ_GPU_INIT_DATA_READY;
179 		break;
180 	case IDH_RAS_POISON:
181 		if (data1 != 0)
182 			event = IDH_RAS_POISON_READY;
183 		break;
184 	case IDH_REQ_RAS_ERROR_COUNT:
185 		event = IDH_RAS_ERROR_COUNT_READY;
186 		break;
187 	default:
188 		break;
189 	}
190 
191 	if (event != -1) {
192 		r = xgpu_nv_poll_msg(adev, event);
193 		if (r) {
194 			if (retry++ < 5)
195 				goto send_request;
196 
197 			if (req != IDH_REQ_GPU_INIT_DATA) {
198 				dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
199 				return r;
200 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
201 				adev->virt.req_init_data_ver = 0;
202 		} else {
203 			if (req == IDH_REQ_GPU_INIT_DATA) {
204 				adev->virt.req_init_data_ver =
205 					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
206 
207 				/* assume V1 in case host doesn't set version number */
208 				if (adev->virt.req_init_data_ver < 1)
209 					adev->virt.req_init_data_ver = 1;
210 			}
211 		}
212 
213 		/* Retrieve checksum from mailbox2 */
214 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
215 			adev->virt.fw_reserve.checksum_key =
216 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
217 		}
218 	}
219 
220 	return 0;
221 }
222 
223 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
224 					enum idh_request req)
225 {
226 	return xgpu_nv_send_access_requests_with_param(adev,
227 						req, 0, 0, 0);
228 }
229 
230 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
231 {
232 	int ret, i = 0;
233 
234 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
235 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
236 		if (!ret)
237 			break;
238 		i++;
239 	}
240 
241 	return ret;
242 }
243 
244 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
245 					   bool init)
246 {
247 	enum idh_request req;
248 
249 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
250 	return xgpu_nv_send_access_requests(adev, req);
251 }
252 
253 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
254 					   bool init)
255 {
256 	enum idh_request req;
257 	int r = 0;
258 
259 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
260 	r = xgpu_nv_send_access_requests(adev, req);
261 
262 	return r;
263 }
264 
265 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
266 {
267 	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
268 }
269 
270 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
271 					struct amdgpu_irq_src *source,
272 					struct amdgpu_iv_entry *entry)
273 {
274 	dev_dbg(adev->dev, "get ack intr and do nothing.\n");
275 	return 0;
276 }
277 
278 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
279 					struct amdgpu_irq_src *source,
280 					unsigned type,
281 					enum amdgpu_interrupt_state state)
282 {
283 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
284 
285 	if (state == AMDGPU_IRQ_STATE_ENABLE)
286 		tmp |= 2;
287 	else
288 		tmp &= ~2;
289 
290 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
291 
292 	return 0;
293 }
294 
295 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
296 {
297 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
298 }
299 
300 static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
301 {
302 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
303 	do {
304 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
305 			dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
306 			return 0;
307 		}
308 		msleep(10);
309 		timeout -= 10;
310 	} while (timeout > 1);
311 
312 	dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
313 	return -ETIME;
314 }
315 
316 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
317 {
318 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
319 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
320 
321 	amdgpu_virt_fini_data_exchange(adev);
322 
323 	/* Trigger recovery for world switch failure if no TDR */
324 	if (amdgpu_device_should_recover_gpu(adev)
325 		&& (!amdgpu_device_has_job_running(adev) ||
326 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
327 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
328 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
329 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
330 		struct amdgpu_reset_context reset_context;
331 		memset(&reset_context, 0, sizeof(reset_context));
332 
333 		reset_context.method = AMD_RESET_METHOD_NONE;
334 		reset_context.reset_req_dev = adev;
335 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
336 		set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
337 
338 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
339 	}
340 }
341 
342 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
343 				       struct amdgpu_irq_src *src,
344 				       unsigned type,
345 				       enum amdgpu_interrupt_state state)
346 {
347 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
348 
349 	if (state == AMDGPU_IRQ_STATE_ENABLE)
350 		tmp |= 1;
351 	else
352 		tmp &= ~1;
353 
354 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
355 
356 	return 0;
357 }
358 
359 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
360 				   struct amdgpu_irq_src *source,
361 				   struct amdgpu_iv_entry *entry)
362 {
363 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
364 
365 	switch (event) {
366 	case IDH_FLR_NOTIFICATION:
367 		if (amdgpu_sriov_runtime(adev))
368 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
369 				   &adev->virt.flr_work),
370 				  "Failed to queue work! at %s",
371 				  __func__);
372 		break;
373 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
374 		 * it byfar since that polling thread will handle it,
375 		 * other msg like flr complete is not handled here.
376 		 */
377 	case IDH_CLR_MSG_BUF:
378 	case IDH_FLR_NOTIFICATION_CMPL:
379 	case IDH_READY_TO_ACCESS_GPU:
380 	default:
381 		break;
382 	}
383 
384 	return 0;
385 }
386 
387 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
388 	.set = xgpu_nv_set_mailbox_ack_irq,
389 	.process = xgpu_nv_mailbox_ack_irq,
390 };
391 
392 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
393 	.set = xgpu_nv_set_mailbox_rcv_irq,
394 	.process = xgpu_nv_mailbox_rcv_irq,
395 };
396 
397 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
398 {
399 	adev->virt.ack_irq.num_types = 1;
400 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
401 	adev->virt.rcv_irq.num_types = 1;
402 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
403 }
404 
405 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
406 {
407 	int r;
408 
409 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
410 	if (r)
411 		return r;
412 
413 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
414 	if (r) {
415 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
416 		return r;
417 	}
418 
419 	return 0;
420 }
421 
422 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
423 {
424 	int r;
425 
426 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
427 	if (r)
428 		return r;
429 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
430 	if (r) {
431 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
432 		return r;
433 	}
434 
435 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
436 
437 	return 0;
438 }
439 
440 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
441 {
442 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
443 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
444 }
445 
446 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
447 		enum amdgpu_ras_block block)
448 {
449 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
450 		xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
451 	} else {
452 		amdgpu_virt_fini_data_exchange(adev);
453 		xgpu_nv_send_access_requests_with_param(adev,
454 					IDH_RAS_POISON,	block, 0, 0);
455 	}
456 }
457 
458 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
459 {
460 	enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
461 
462 	return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
463 }
464 
465 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
466 {
467 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
468 }
469 
470 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
471 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
472 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
473 	.req_init_data  = xgpu_nv_request_init_data,
474 	.reset_gpu = xgpu_nv_request_reset,
475 	.ready_to_reset = xgpu_nv_ready_to_reset,
476 	.wait_reset = xgpu_nv_wait_reset,
477 	.trans_msg = xgpu_nv_mailbox_trans_msg,
478 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
479 	.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
480 	.req_ras_err_count = xgpu_nv_req_ras_err_count,
481 };
482