xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 #include "amdgpu_reset.h"
35 
36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40 
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45 
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59 
60 
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	int r = 0;
65 	u32 reg;
66 
67 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 	if (reg == IDH_FAIL)
69 		r = -EINVAL;
70 	if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
71 		r = -ENODEV;
72 	else if (reg != event)
73 		return -ENOENT;
74 
75 	xgpu_nv_mailbox_send_ack(adev);
76 
77 	return r;
78 }
79 
80 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
81 {
82 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
83 }
84 
85 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
86 {
87 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
88 	u8 reg;
89 
90 	do {
91 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
92 		if (reg & 2)
93 			return 0;
94 
95 		mdelay(5);
96 		timeout -= 5;
97 	} while (timeout > 1);
98 
99 	dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
100 
101 	return -ETIME;
102 }
103 
104 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
105 {
106 	int r;
107 	uint64_t timeout, now;
108 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
109 
110 	now = (uint64_t)ktime_to_ms(ktime_get());
111 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
112 
113 	do {
114 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
115 		if (!r) {
116 			dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
117 					event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
118 			return 0;
119 		} else if (r == -ENODEV) {
120 			if (!amdgpu_ras_is_rma(adev)) {
121 				ras->is_rma = true;
122 				dev_err(adev->dev, "VF is in an unrecoverable state. "
123 						"Runtime Services are halted.\n");
124 			}
125 			return r;
126 		}
127 
128 		msleep(10);
129 		now = (uint64_t)ktime_to_ms(ktime_get());
130 	} while (timeout > now);
131 
132 	dev_dbg(adev->dev, "nv_poll_msg timed out\n");
133 
134 	return -ETIME;
135 }
136 
137 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
138 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
139 {
140 	int r;
141 	uint8_t trn;
142 
143 	/* IMPORTANT:
144 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
145 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
146 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
147 	 * will return immediatly
148 	 */
149 	do {
150 		xgpu_nv_mailbox_set_valid(adev, false);
151 		trn = xgpu_nv_peek_ack(adev);
152 		if (trn) {
153 			dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
154 			msleep(1);
155 		}
156 	} while (trn);
157 
158 	dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
159 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
160 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
161 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
162 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
163 	xgpu_nv_mailbox_set_valid(adev, true);
164 
165 	/* start to poll ack */
166 	r = xgpu_nv_poll_ack(adev);
167 	if (r)
168 		dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
169 
170 	xgpu_nv_mailbox_set_valid(adev, false);
171 }
172 
173 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
174 			enum idh_request req, u32 data1, u32 data2, u32 data3)
175 {
176 	int r, retry = 1;
177 	enum idh_event event = -1;
178 
179 send_request:
180 
181 	if (amdgpu_ras_is_rma(adev))
182 		return -ENODEV;
183 
184 	xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
185 
186 	switch (req) {
187 	case IDH_REQ_GPU_INIT_ACCESS:
188 	case IDH_REQ_GPU_FINI_ACCESS:
189 	case IDH_REQ_GPU_RESET_ACCESS:
190 		event = IDH_READY_TO_ACCESS_GPU;
191 		break;
192 	case IDH_REQ_GPU_INIT_DATA:
193 		event = IDH_REQ_GPU_INIT_DATA_READY;
194 		break;
195 	case IDH_RAS_POISON:
196 		if (data1 != 0)
197 			event = IDH_RAS_POISON_READY;
198 		break;
199 	case IDH_REQ_RAS_ERROR_COUNT:
200 		event = IDH_RAS_ERROR_COUNT_READY;
201 		break;
202 	case IDH_REQ_RAS_CPER_DUMP:
203 		event = IDH_RAS_CPER_DUMP_READY;
204 		break;
205 	case IDH_REQ_RAS_CHK_CRITI:
206 		event = IDH_REQ_RAS_CHK_CRITI_READY;
207 		break;
208 	default:
209 		break;
210 	}
211 
212 	if (event != -1) {
213 		r = xgpu_nv_poll_msg(adev, event);
214 		if (r) {
215 			if (retry++ < 5)
216 				goto send_request;
217 
218 			if (req != IDH_REQ_GPU_INIT_DATA) {
219 				dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
220 				return r;
221 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
222 				adev->virt.req_init_data_ver = 0;
223 		} else {
224 			if (req == IDH_REQ_GPU_INIT_DATA) {
225 				switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
226 				case GPU_CRIT_REGION_V2:
227 					adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
228 					adev->virt.init_data_header.offset =
229 						RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
230 					adev->virt.init_data_header.size_kb =
231 						RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
232 					break;
233 				default:
234 					adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
235 					adev->virt.init_data_header.offset = -1;
236 					adev->virt.init_data_header.size_kb = 0;
237 					break;
238 				}
239 			}
240 		}
241 
242 		/* Retrieve checksum from mailbox2 */
243 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
244 			adev->virt.fw_reserve.checksum_key =
245 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
246 		}
247 	}
248 
249 	return 0;
250 }
251 
252 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
253 					enum idh_request req)
254 {
255 	return xgpu_nv_send_access_requests_with_param(adev,
256 						req, 0, 0, 0);
257 }
258 
259 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
260 {
261 	int ret, i = 0;
262 
263 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
264 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
265 		if (!ret)
266 			break;
267 		i++;
268 	}
269 
270 	return ret;
271 }
272 
273 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
274 					   bool init)
275 {
276 	enum idh_request req;
277 
278 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
279 	return xgpu_nv_send_access_requests(adev, req);
280 }
281 
282 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
283 					   bool init)
284 {
285 	enum idh_request req;
286 	int r = 0;
287 
288 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
289 	r = xgpu_nv_send_access_requests(adev, req);
290 
291 	return r;
292 }
293 
294 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
295 {
296 	return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
297 			0, GPU_CRIT_REGION_V2, 0);
298 }
299 
300 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
301 					struct amdgpu_irq_src *source,
302 					struct amdgpu_iv_entry *entry)
303 {
304 	dev_dbg(adev->dev, "get ack intr and do nothing.\n");
305 	return 0;
306 }
307 
308 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
309 					struct amdgpu_irq_src *source,
310 					unsigned type,
311 					enum amdgpu_interrupt_state state)
312 {
313 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
314 
315 	if (state == AMDGPU_IRQ_STATE_ENABLE)
316 		tmp |= 2;
317 	else
318 		tmp &= ~2;
319 
320 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
321 
322 	return 0;
323 }
324 
325 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
326 {
327 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
328 }
329 
330 static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
331 {
332 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
333 	do {
334 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
335 			dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
336 			return 0;
337 		}
338 		msleep(10);
339 		timeout -= 10;
340 	} while (timeout > 1);
341 
342 	dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
343 	return -ETIME;
344 }
345 
346 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
347 {
348 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
349 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
350 	struct amdgpu_reset_context reset_context = { 0 };
351 
352 	amdgpu_virt_fini_data_exchange(adev);
353 
354 	/* Trigger recovery for world switch failure if no TDR */
355 	if (amdgpu_device_should_recover_gpu(adev)
356 		&& (!amdgpu_device_has_job_running(adev) ||
357 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
358 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
359 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
360 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
361 
362 		reset_context.method = AMD_RESET_METHOD_NONE;
363 		reset_context.reset_req_dev = adev;
364 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
365 		set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
366 
367 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
368 	}
369 }
370 
371 static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
372 {
373 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
374 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
375 
376 	if (down_read_trylock(&adev->reset_domain->sem)) {
377 		amdgpu_virt_fini_data_exchange(adev);
378 		amdgpu_virt_request_bad_pages(adev);
379 		up_read(&adev->reset_domain->sem);
380 	}
381 }
382 
383 /**
384  * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
385  * @work: pointer to the work_struct
386  *
387  * This work handler is triggered when bad pages are ready, and it reinitializes
388  * the data exchange region to retrieve updated bad page information from the host.
389  */
390 static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
391 {
392 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
393 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
394 
395 	if (down_read_trylock(&adev->reset_domain->sem)) {
396 		amdgpu_virt_fini_data_exchange(adev);
397 		amdgpu_virt_init_data_exchange(adev);
398 		up_read(&adev->reset_domain->sem);
399 	}
400 }
401 
402 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
403 				       struct amdgpu_irq_src *src,
404 				       unsigned type,
405 				       enum amdgpu_interrupt_state state)
406 {
407 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
408 
409 	if (state == AMDGPU_IRQ_STATE_ENABLE)
410 		tmp |= 1;
411 	else
412 		tmp &= ~1;
413 
414 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
415 
416 	return 0;
417 }
418 
419 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
420 				   struct amdgpu_irq_src *source,
421 				   struct amdgpu_iv_entry *entry)
422 {
423 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
424 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
425 
426 	switch (event) {
427 	case IDH_RAS_BAD_PAGES_READY:
428 		xgpu_nv_mailbox_send_ack(adev);
429 		if (amdgpu_sriov_runtime(adev))
430 			schedule_work(&adev->virt.handle_bad_pages_work);
431 		break;
432 	case IDH_RAS_BAD_PAGES_NOTIFICATION:
433 		xgpu_nv_mailbox_send_ack(adev);
434 		if (amdgpu_sriov_runtime(adev))
435 			schedule_work(&adev->virt.req_bad_pages_work);
436 		break;
437 	case IDH_UNRECOV_ERR_NOTIFICATION:
438 		xgpu_nv_mailbox_send_ack(adev);
439 		if (!amdgpu_ras_is_rma(adev)) {
440 			ras->is_rma = true;
441 			dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
442 		}
443 
444 		if (amdgpu_sriov_runtime(adev))
445 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
446 						&adev->virt.flr_work),
447 					"Failed to queue work! at %s",
448 					__func__);
449 		break;
450 	case IDH_FLR_NOTIFICATION:
451 		if (amdgpu_sriov_runtime(adev))
452 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
453 				   &adev->virt.flr_work),
454 				  "Failed to queue work! at %s",
455 				  __func__);
456 		break;
457 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
458 		 * it byfar since that polling thread will handle it,
459 		 * other msg like flr complete is not handled here.
460 		 */
461 	case IDH_CLR_MSG_BUF:
462 	case IDH_FLR_NOTIFICATION_CMPL:
463 	case IDH_READY_TO_ACCESS_GPU:
464 	default:
465 		break;
466 	}
467 
468 	return 0;
469 }
470 
471 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
472 	.set = xgpu_nv_set_mailbox_ack_irq,
473 	.process = xgpu_nv_mailbox_ack_irq,
474 };
475 
476 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
477 	.set = xgpu_nv_set_mailbox_rcv_irq,
478 	.process = xgpu_nv_mailbox_rcv_irq,
479 };
480 
481 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
482 {
483 	adev->virt.ack_irq.num_types = 1;
484 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
485 	adev->virt.rcv_irq.num_types = 1;
486 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
487 }
488 
489 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
490 {
491 	int r;
492 
493 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
494 	if (r)
495 		return r;
496 
497 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
498 	if (r) {
499 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
500 		return r;
501 	}
502 
503 	return 0;
504 }
505 
506 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
507 {
508 	int r;
509 
510 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
511 	if (r)
512 		return r;
513 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
514 	if (r) {
515 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
516 		return r;
517 	}
518 
519 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
520 	INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
521 	INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
522 
523 	return 0;
524 }
525 
526 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
527 {
528 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
529 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
530 }
531 
532 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
533 		enum amdgpu_ras_block block)
534 {
535 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
536 		xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
537 	} else {
538 		amdgpu_virt_fini_data_exchange(adev);
539 		xgpu_nv_send_access_requests_with_param(adev,
540 					IDH_RAS_POISON,	block, 0, 0);
541 	}
542 }
543 
544 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
545 {
546 	enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
547 
548 	return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
549 }
550 
551 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
552 {
553 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
554 }
555 
556 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
557 {
558 	uint32_t vf_rptr_hi, vf_rptr_lo;
559 
560 	vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
561 	vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
562 	return xgpu_nv_send_access_requests_with_param(
563 		adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
564 }
565 
566 static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
567 {
568 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
569 }
570 
571 static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
572 {
573 	uint32_t addr_hi, addr_lo;
574 
575 	addr_hi = (uint32_t)(addr >> 32);
576 	addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
577 	return xgpu_nv_send_access_requests_with_param(
578 		adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
579 }
580 
581 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
582 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
583 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
584 	.req_init_data  = xgpu_nv_request_init_data,
585 	.reset_gpu = xgpu_nv_request_reset,
586 	.ready_to_reset = xgpu_nv_ready_to_reset,
587 	.wait_reset = xgpu_nv_wait_reset,
588 	.trans_msg = xgpu_nv_mailbox_trans_msg,
589 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
590 	.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
591 	.req_ras_err_count = xgpu_nv_req_ras_err_count,
592 	.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
593 	.req_bad_pages = xgpu_nv_req_ras_bad_pages,
594 	.req_ras_chk_criti = xgpu_nv_check_vf_critical_region
595 };
596