xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 #include "amdgpu_reset.h"
35 
36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40 
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45 
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59 
60 
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	int r = 0;
65 	u32 reg;
66 
67 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 	if (reg == IDH_FAIL)
69 		r = -EINVAL;
70 	if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
71 		r = -ENODEV;
72 	else if (reg != event)
73 		return -ENOENT;
74 
75 	xgpu_nv_mailbox_send_ack(adev);
76 
77 	return r;
78 }
79 
80 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
81 {
82 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
83 }
84 
85 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
86 {
87 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
88 	u8 reg;
89 
90 	do {
91 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
92 		if (reg & 2)
93 			return 0;
94 
95 		mdelay(5);
96 		timeout -= 5;
97 	} while (timeout > 1);
98 
99 	dev_err(adev->dev,
100 		"Doesn't get TRN_MSG_ACK from pf in %d msec\n",
101 		NV_MAILBOX_POLL_ACK_TIMEDOUT);
102 
103 	return -ETIME;
104 }
105 
106 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
107 {
108 	int r;
109 	uint64_t timeout, now;
110 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
111 
112 	now = (uint64_t)ktime_to_ms(ktime_get());
113 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
114 
115 	do {
116 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
117 		if (!r) {
118 			dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
119 					event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
120 			return 0;
121 		} else if (r == -ENODEV) {
122 			if (!amdgpu_ras_is_rma(adev)) {
123 				ras->is_rma = true;
124 				dev_err(adev->dev, "VF is in an unrecoverable state. "
125 						"Runtime Services are halted.\n");
126 			}
127 			return r;
128 		}
129 
130 		msleep(10);
131 		now = (uint64_t)ktime_to_ms(ktime_get());
132 	} while (timeout > now);
133 
134 	dev_dbg(adev->dev, "nv_poll_msg timed out\n");
135 
136 	return -ETIME;
137 }
138 
139 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
140 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
141 {
142 	int r;
143 	uint8_t trn;
144 
145 	/* IMPORTANT:
146 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
147 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
148 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
149 	 * will return immediatly
150 	 */
151 	do {
152 		xgpu_nv_mailbox_set_valid(adev, false);
153 		trn = xgpu_nv_peek_ack(adev);
154 		if (trn) {
155 			dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
156 			msleep(1);
157 		}
158 	} while (trn);
159 
160 	dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
161 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
162 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
163 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
164 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
165 	xgpu_nv_mailbox_set_valid(adev, true);
166 
167 	/* start to poll ack */
168 	r = xgpu_nv_poll_ack(adev);
169 	if (r)
170 		dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
171 
172 	xgpu_nv_mailbox_set_valid(adev, false);
173 }
174 
175 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
176 			enum idh_request req, u32 data1, u32 data2, u32 data3)
177 {
178 	struct amdgpu_virt *virt = &adev->virt;
179 	int r = 0, retry = 1;
180 	enum idh_event event = -1;
181 
182 	mutex_lock(&virt->access_req_mutex);
183 send_request:
184 
185 	if (amdgpu_ras_is_rma(adev)) {
186 		r = -ENODEV;
187 		goto out;
188 	}
189 
190 	xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
191 
192 	switch (req) {
193 	case IDH_REQ_GPU_INIT_ACCESS:
194 	case IDH_REQ_GPU_FINI_ACCESS:
195 	case IDH_REQ_GPU_RESET_ACCESS:
196 		event = IDH_READY_TO_ACCESS_GPU;
197 		break;
198 	case IDH_REQ_GPU_INIT_DATA:
199 		event = IDH_REQ_GPU_INIT_DATA_READY;
200 		break;
201 	case IDH_RAS_POISON:
202 		if (data1 != 0)
203 			event = IDH_RAS_POISON_READY;
204 		break;
205 	case IDH_REQ_RAS_ERROR_COUNT:
206 		event = IDH_RAS_ERROR_COUNT_READY;
207 		break;
208 	case IDH_REQ_RAS_CPER_DUMP:
209 		event = IDH_RAS_CPER_DUMP_READY;
210 		break;
211 	case IDH_REQ_RAS_CHK_CRITI:
212 		event = IDH_REQ_RAS_CHK_CRITI_READY;
213 		break;
214 	case IDH_REQ_RAS_REMOTE_CMD:
215 		event = IDH_REQ_RAS_REMOTE_CMD_READY;
216 		break;
217 	default:
218 		break;
219 	}
220 
221 	if (event != -1) {
222 		r = xgpu_nv_poll_msg(adev, event);
223 		if (r) {
224 			if (retry++ < 5)
225 				goto send_request;
226 
227 			if (req != IDH_REQ_GPU_INIT_DATA) {
228 				dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
229 				goto out;
230 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
231 				adev->virt.req_init_data_ver = 0;
232 		} else {
233 			if (req == IDH_REQ_GPU_INIT_DATA) {
234 				switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
235 				case GPU_CRIT_REGION_V2:
236 					adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
237 					adev->virt.init_data_header.offset =
238 						RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
239 					adev->virt.init_data_header.size_kb =
240 						RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
241 					break;
242 				default:
243 					adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
244 					adev->virt.init_data_header.offset = -1;
245 					adev->virt.init_data_header.size_kb = 0;
246 					break;
247 				}
248 			}
249 		}
250 
251 		/* Retrieve checksum from mailbox2 */
252 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
253 			adev->virt.fw_reserve.checksum_key =
254 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
255 		}
256 	}
257 
258 out:
259 	mutex_unlock(&virt->access_req_mutex);
260 
261 	return r;
262 }
263 
264 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
265 					enum idh_request req)
266 {
267 	return xgpu_nv_send_access_requests_with_param(adev,
268 						req, 0, 0, 0);
269 }
270 
271 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
272 {
273 	int ret, i = 0;
274 
275 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
276 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
277 		if (!ret)
278 			break;
279 		i++;
280 	}
281 
282 	return ret;
283 }
284 
285 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
286 					   bool init)
287 {
288 	enum idh_request req;
289 
290 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
291 	return xgpu_nv_send_access_requests(adev, req);
292 }
293 
294 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
295 					   bool init)
296 {
297 	enum idh_request req;
298 	int r = 0;
299 
300 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
301 	r = xgpu_nv_send_access_requests(adev, req);
302 
303 	return r;
304 }
305 
306 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
307 {
308 	return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
309 			0, GPU_CRIT_REGION_V2, 0);
310 }
311 
312 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
313 					struct amdgpu_irq_src *source,
314 					struct amdgpu_iv_entry *entry)
315 {
316 	dev_dbg(adev->dev, "get ack intr and do nothing.\n");
317 	return 0;
318 }
319 
320 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
321 					struct amdgpu_irq_src *source,
322 					unsigned type,
323 					enum amdgpu_interrupt_state state)
324 {
325 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
326 
327 	if (state == AMDGPU_IRQ_STATE_ENABLE)
328 		tmp |= 2;
329 	else
330 		tmp &= ~2;
331 
332 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
333 
334 	return 0;
335 }
336 
337 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
338 {
339 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
340 }
341 
342 static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
343 {
344 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
345 	do {
346 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
347 			dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
348 			return 0;
349 		}
350 		msleep(10);
351 		timeout -= 10;
352 	} while (timeout > 1);
353 
354 	dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
355 	return -ETIME;
356 }
357 
358 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
359 {
360 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
361 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
362 	struct amdgpu_reset_context reset_context = { 0 };
363 
364 	amdgpu_virt_fini_data_exchange(adev);
365 
366 	/* Trigger recovery for world switch failure if no TDR */
367 	if (amdgpu_device_should_recover_gpu(adev)
368 		&& (!amdgpu_device_has_job_running(adev) ||
369 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
370 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
371 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
372 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
373 
374 		reset_context.method = AMD_RESET_METHOD_NONE;
375 		reset_context.reset_req_dev = adev;
376 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
377 		set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
378 
379 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
380 	}
381 }
382 
383 static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
384 {
385 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
386 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
387 
388 	if (down_read_trylock(&adev->reset_domain->sem)) {
389 		amdgpu_virt_fini_data_exchange(adev);
390 		amdgpu_virt_request_bad_pages(adev);
391 		up_read(&adev->reset_domain->sem);
392 	}
393 }
394 
395 /**
396  * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
397  * @work: pointer to the work_struct
398  *
399  * This work handler is triggered when bad pages are ready, and it reinitializes
400  * the data exchange region to retrieve updated bad page information from the host.
401  */
402 static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
403 {
404 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
405 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
406 
407 	if (down_read_trylock(&adev->reset_domain->sem)) {
408 		amdgpu_virt_fini_data_exchange(adev);
409 		amdgpu_virt_init_data_exchange(adev);
410 		up_read(&adev->reset_domain->sem);
411 	}
412 }
413 
414 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
415 				       struct amdgpu_irq_src *src,
416 				       unsigned type,
417 				       enum amdgpu_interrupt_state state)
418 {
419 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
420 
421 	if (state == AMDGPU_IRQ_STATE_ENABLE)
422 		tmp |= 1;
423 	else
424 		tmp &= ~1;
425 
426 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
427 
428 	return 0;
429 }
430 
431 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
432 				   struct amdgpu_irq_src *source,
433 				   struct amdgpu_iv_entry *entry)
434 {
435 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
436 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
437 
438 	switch (event) {
439 	case IDH_RAS_BAD_PAGES_READY:
440 		xgpu_nv_mailbox_send_ack(adev);
441 		if (amdgpu_sriov_runtime(adev))
442 			schedule_work(&adev->virt.handle_bad_pages_work);
443 		break;
444 	case IDH_RAS_BAD_PAGES_NOTIFICATION:
445 		xgpu_nv_mailbox_send_ack(adev);
446 		if (amdgpu_sriov_runtime(adev))
447 			schedule_work(&adev->virt.req_bad_pages_work);
448 		break;
449 	case IDH_UNRECOV_ERR_NOTIFICATION:
450 		xgpu_nv_mailbox_send_ack(adev);
451 		if (!amdgpu_ras_is_rma(adev)) {
452 			ras->is_rma = true;
453 			dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
454 		}
455 
456 		if (amdgpu_sriov_runtime(adev))
457 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
458 						&adev->virt.flr_work),
459 					"Failed to queue work! at %s",
460 					__func__);
461 		break;
462 	case IDH_FLR_NOTIFICATION:
463 		if (amdgpu_sriov_runtime(adev))
464 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
465 				   &adev->virt.flr_work),
466 				  "Failed to queue work! at %s",
467 				  __func__);
468 		break;
469 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
470 		 * it byfar since that polling thread will handle it,
471 		 * other msg like flr complete is not handled here.
472 		 */
473 	case IDH_CLR_MSG_BUF:
474 	case IDH_FLR_NOTIFICATION_CMPL:
475 	case IDH_READY_TO_ACCESS_GPU:
476 	default:
477 		break;
478 	}
479 
480 	return 0;
481 }
482 
483 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
484 	.set = xgpu_nv_set_mailbox_ack_irq,
485 	.process = xgpu_nv_mailbox_ack_irq,
486 };
487 
488 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
489 	.set = xgpu_nv_set_mailbox_rcv_irq,
490 	.process = xgpu_nv_mailbox_rcv_irq,
491 };
492 
493 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
494 {
495 	adev->virt.ack_irq.num_types = 1;
496 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
497 	adev->virt.rcv_irq.num_types = 1;
498 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
499 }
500 
501 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
502 {
503 	int r;
504 
505 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
506 	if (r)
507 		return r;
508 
509 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
510 	if (r) {
511 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
512 		return r;
513 	}
514 
515 	return 0;
516 }
517 
518 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
519 {
520 	int r;
521 
522 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
523 	if (r)
524 		return r;
525 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
526 	if (r) {
527 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
528 		return r;
529 	}
530 
531 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
532 	INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
533 	INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
534 
535 	return 0;
536 }
537 
538 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
539 {
540 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
541 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
542 }
543 
544 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
545 		enum amdgpu_ras_block block)
546 {
547 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
548 		xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
549 	} else {
550 		amdgpu_virt_fini_data_exchange(adev);
551 		xgpu_nv_send_access_requests_with_param(adev,
552 					IDH_RAS_POISON,	block, 0, 0);
553 	}
554 }
555 
556 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
557 {
558 	enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
559 
560 	return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
561 }
562 
563 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
564 {
565 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
566 }
567 
568 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
569 {
570 	uint32_t vf_rptr_hi, vf_rptr_lo;
571 
572 	vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
573 	vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
574 	return xgpu_nv_send_access_requests_with_param(
575 		adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
576 }
577 
578 static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
579 {
580 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
581 }
582 
583 static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
584 {
585 	uint32_t addr_hi, addr_lo;
586 
587 	addr_hi = (uint32_t)(addr >> 32);
588 	addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
589 	return xgpu_nv_send_access_requests_with_param(
590 		adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
591 }
592 
593 static int xgpu_nv_req_remote_ras_cmd(struct amdgpu_device *adev,
594 		u32 param1, u32 param2, u32 param3)
595 {
596 	return xgpu_nv_send_access_requests_with_param(
597 		adev, IDH_REQ_RAS_REMOTE_CMD, param1, param2, param3);
598 }
599 
600 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
601 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
602 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
603 	.req_init_data  = xgpu_nv_request_init_data,
604 	.reset_gpu = xgpu_nv_request_reset,
605 	.ready_to_reset = xgpu_nv_ready_to_reset,
606 	.wait_reset = xgpu_nv_wait_reset,
607 	.trans_msg = xgpu_nv_mailbox_trans_msg,
608 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
609 	.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
610 	.req_ras_err_count = xgpu_nv_req_ras_err_count,
611 	.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
612 	.req_bad_pages = xgpu_nv_req_ras_bad_pages,
613 	.req_ras_chk_criti = xgpu_nv_check_vf_critical_region,
614 	.req_remote_ras_cmd = xgpu_nv_req_remote_ras_cmd,
615 };
616