xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision 01479d140686430ce51d01dc4ad4548323bd1232)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 #include "amdgpu_reset.h"
35 
36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40 
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45 
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59 
60 
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	int r = 0;
65 	u32 reg;
66 
67 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 	if (reg == IDH_FAIL)
69 		r = -EINVAL;
70 	if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
71 		r = -ENODEV;
72 	else if (reg != event)
73 		return -ENOENT;
74 
75 	xgpu_nv_mailbox_send_ack(adev);
76 
77 	return r;
78 }
79 
80 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
81 {
82 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
83 }
84 
85 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
86 {
87 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
88 	u8 reg;
89 
90 	do {
91 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
92 		if (reg & 2)
93 			return 0;
94 
95 		mdelay(5);
96 		timeout -= 5;
97 	} while (timeout > 1);
98 
99 	dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
100 
101 	return -ETIME;
102 }
103 
104 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
105 {
106 	int r;
107 	uint64_t timeout, now;
108 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
109 
110 	now = (uint64_t)ktime_to_ms(ktime_get());
111 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
112 
113 	do {
114 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
115 		if (!r) {
116 			dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
117 					event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
118 			return 0;
119 		} else if (r == -ENODEV) {
120 			if (!amdgpu_ras_is_rma(adev)) {
121 				ras->is_rma = true;
122 				dev_err(adev->dev, "VF is in an unrecoverable state. "
123 						"Runtime Services are halted.\n");
124 			}
125 			return r;
126 		}
127 
128 		msleep(10);
129 		now = (uint64_t)ktime_to_ms(ktime_get());
130 	} while (timeout > now);
131 
132 	dev_dbg(adev->dev, "nv_poll_msg timed out\n");
133 
134 	return -ETIME;
135 }
136 
137 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
138 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
139 {
140 	int r;
141 	uint8_t trn;
142 
143 	/* IMPORTANT:
144 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
145 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
146 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
147 	 * will return immediatly
148 	 */
149 	do {
150 		xgpu_nv_mailbox_set_valid(adev, false);
151 		trn = xgpu_nv_peek_ack(adev);
152 		if (trn) {
153 			dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
154 			msleep(1);
155 		}
156 	} while (trn);
157 
158 	dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
159 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
160 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
161 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
162 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
163 	xgpu_nv_mailbox_set_valid(adev, true);
164 
165 	/* start to poll ack */
166 	r = xgpu_nv_poll_ack(adev);
167 	if (r)
168 		dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
169 
170 	xgpu_nv_mailbox_set_valid(adev, false);
171 }
172 
173 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
174 			enum idh_request req, u32 data1, u32 data2, u32 data3)
175 {
176 	int r, retry = 1;
177 	enum idh_event event = -1;
178 
179 send_request:
180 
181 	if (amdgpu_ras_is_rma(adev))
182 		return -ENODEV;
183 
184 	xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
185 
186 	switch (req) {
187 	case IDH_REQ_GPU_INIT_ACCESS:
188 	case IDH_REQ_GPU_FINI_ACCESS:
189 	case IDH_REQ_GPU_RESET_ACCESS:
190 		event = IDH_READY_TO_ACCESS_GPU;
191 		break;
192 	case IDH_REQ_GPU_INIT_DATA:
193 		event = IDH_REQ_GPU_INIT_DATA_READY;
194 		break;
195 	case IDH_RAS_POISON:
196 		if (data1 != 0)
197 			event = IDH_RAS_POISON_READY;
198 		break;
199 	case IDH_REQ_RAS_ERROR_COUNT:
200 		event = IDH_RAS_ERROR_COUNT_READY;
201 		break;
202 	case IDH_REQ_RAS_CPER_DUMP:
203 		event = IDH_RAS_CPER_DUMP_READY;
204 		break;
205 	default:
206 		break;
207 	}
208 
209 	if (event != -1) {
210 		r = xgpu_nv_poll_msg(adev, event);
211 		if (r) {
212 			if (retry++ < 5)
213 				goto send_request;
214 
215 			if (req != IDH_REQ_GPU_INIT_DATA) {
216 				dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
217 				return r;
218 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
219 				adev->virt.req_init_data_ver = 0;
220 		} else {
221 			if (req == IDH_REQ_GPU_INIT_DATA) {
222 				adev->virt.req_init_data_ver =
223 					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
224 
225 				/* assume V1 in case host doesn't set version number */
226 				if (adev->virt.req_init_data_ver < 1)
227 					adev->virt.req_init_data_ver = 1;
228 			}
229 		}
230 
231 		/* Retrieve checksum from mailbox2 */
232 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
233 			adev->virt.fw_reserve.checksum_key =
234 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
235 		}
236 	}
237 
238 	return 0;
239 }
240 
241 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
242 					enum idh_request req)
243 {
244 	return xgpu_nv_send_access_requests_with_param(adev,
245 						req, 0, 0, 0);
246 }
247 
248 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
249 {
250 	int ret, i = 0;
251 
252 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
253 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
254 		if (!ret)
255 			break;
256 		i++;
257 	}
258 
259 	return ret;
260 }
261 
262 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
263 					   bool init)
264 {
265 	enum idh_request req;
266 
267 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
268 	return xgpu_nv_send_access_requests(adev, req);
269 }
270 
271 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
272 					   bool init)
273 {
274 	enum idh_request req;
275 	int r = 0;
276 
277 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
278 	r = xgpu_nv_send_access_requests(adev, req);
279 
280 	return r;
281 }
282 
283 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
284 {
285 	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
286 }
287 
288 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
289 					struct amdgpu_irq_src *source,
290 					struct amdgpu_iv_entry *entry)
291 {
292 	dev_dbg(adev->dev, "get ack intr and do nothing.\n");
293 	return 0;
294 }
295 
296 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
297 					struct amdgpu_irq_src *source,
298 					unsigned type,
299 					enum amdgpu_interrupt_state state)
300 {
301 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
302 
303 	if (state == AMDGPU_IRQ_STATE_ENABLE)
304 		tmp |= 2;
305 	else
306 		tmp &= ~2;
307 
308 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
309 
310 	return 0;
311 }
312 
313 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
314 {
315 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
316 }
317 
318 static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
319 {
320 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
321 	do {
322 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
323 			dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
324 			return 0;
325 		}
326 		msleep(10);
327 		timeout -= 10;
328 	} while (timeout > 1);
329 
330 	dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
331 	return -ETIME;
332 }
333 
334 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
335 {
336 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
337 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
338 	struct amdgpu_reset_context reset_context = { 0 };
339 
340 	amdgpu_virt_fini_data_exchange(adev);
341 
342 	/* Trigger recovery for world switch failure if no TDR */
343 	if (amdgpu_device_should_recover_gpu(adev)
344 		&& (!amdgpu_device_has_job_running(adev) ||
345 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
346 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
347 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
348 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
349 
350 		reset_context.method = AMD_RESET_METHOD_NONE;
351 		reset_context.reset_req_dev = adev;
352 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
353 		set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
354 
355 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
356 	}
357 }
358 
359 static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
360 {
361 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
362 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
363 
364 	if (down_read_trylock(&adev->reset_domain->sem)) {
365 		amdgpu_virt_fini_data_exchange(adev);
366 		amdgpu_virt_request_bad_pages(adev);
367 		up_read(&adev->reset_domain->sem);
368 	}
369 }
370 
371 /**
372  * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
373  * @work: pointer to the work_struct
374  *
375  * This work handler is triggered when bad pages are ready, and it reinitializes
376  * the data exchange region to retrieve updated bad page information from the host.
377  */
378 static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
379 {
380 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
381 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
382 
383 	if (down_read_trylock(&adev->reset_domain->sem)) {
384 		amdgpu_virt_fini_data_exchange(adev);
385 		amdgpu_virt_init_data_exchange(adev);
386 		up_read(&adev->reset_domain->sem);
387 	}
388 }
389 
390 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
391 				       struct amdgpu_irq_src *src,
392 				       unsigned type,
393 				       enum amdgpu_interrupt_state state)
394 {
395 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
396 
397 	if (state == AMDGPU_IRQ_STATE_ENABLE)
398 		tmp |= 1;
399 	else
400 		tmp &= ~1;
401 
402 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
403 
404 	return 0;
405 }
406 
407 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
408 				   struct amdgpu_irq_src *source,
409 				   struct amdgpu_iv_entry *entry)
410 {
411 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
412 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
413 
414 	switch (event) {
415 	case IDH_RAS_BAD_PAGES_READY:
416 		xgpu_nv_mailbox_send_ack(adev);
417 		if (amdgpu_sriov_runtime(adev))
418 			schedule_work(&adev->virt.handle_bad_pages_work);
419 		break;
420 	case IDH_RAS_BAD_PAGES_NOTIFICATION:
421 		xgpu_nv_mailbox_send_ack(adev);
422 		if (amdgpu_sriov_runtime(adev))
423 			schedule_work(&adev->virt.req_bad_pages_work);
424 		break;
425 	case IDH_UNRECOV_ERR_NOTIFICATION:
426 		xgpu_nv_mailbox_send_ack(adev);
427 		if (!amdgpu_ras_is_rma(adev)) {
428 			ras->is_rma = true;
429 			dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
430 		}
431 
432 		if (amdgpu_sriov_runtime(adev))
433 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
434 						&adev->virt.flr_work),
435 					"Failed to queue work! at %s",
436 					__func__);
437 		break;
438 	case IDH_FLR_NOTIFICATION:
439 		if (amdgpu_sriov_runtime(adev))
440 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
441 				   &adev->virt.flr_work),
442 				  "Failed to queue work! at %s",
443 				  __func__);
444 		break;
445 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
446 		 * it byfar since that polling thread will handle it,
447 		 * other msg like flr complete is not handled here.
448 		 */
449 	case IDH_CLR_MSG_BUF:
450 	case IDH_FLR_NOTIFICATION_CMPL:
451 	case IDH_READY_TO_ACCESS_GPU:
452 	default:
453 		break;
454 	}
455 
456 	return 0;
457 }
458 
459 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
460 	.set = xgpu_nv_set_mailbox_ack_irq,
461 	.process = xgpu_nv_mailbox_ack_irq,
462 };
463 
464 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
465 	.set = xgpu_nv_set_mailbox_rcv_irq,
466 	.process = xgpu_nv_mailbox_rcv_irq,
467 };
468 
469 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
470 {
471 	adev->virt.ack_irq.num_types = 1;
472 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
473 	adev->virt.rcv_irq.num_types = 1;
474 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
475 }
476 
477 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
478 {
479 	int r;
480 
481 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
482 	if (r)
483 		return r;
484 
485 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
486 	if (r) {
487 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
488 		return r;
489 	}
490 
491 	return 0;
492 }
493 
494 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
495 {
496 	int r;
497 
498 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
499 	if (r)
500 		return r;
501 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
502 	if (r) {
503 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
504 		return r;
505 	}
506 
507 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
508 	INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
509 	INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
510 
511 	return 0;
512 }
513 
514 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
515 {
516 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
517 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
518 }
519 
520 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
521 		enum amdgpu_ras_block block)
522 {
523 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
524 		xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
525 	} else {
526 		amdgpu_virt_fini_data_exchange(adev);
527 		xgpu_nv_send_access_requests_with_param(adev,
528 					IDH_RAS_POISON,	block, 0, 0);
529 	}
530 }
531 
532 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
533 {
534 	enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
535 
536 	return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
537 }
538 
539 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
540 {
541 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
542 }
543 
544 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
545 {
546 	uint32_t vf_rptr_hi, vf_rptr_lo;
547 
548 	vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
549 	vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
550 	return xgpu_nv_send_access_requests_with_param(
551 		adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
552 }
553 
554 static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
555 {
556 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
557 }
558 
559 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
560 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
561 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
562 	.req_init_data  = xgpu_nv_request_init_data,
563 	.reset_gpu = xgpu_nv_request_reset,
564 	.ready_to_reset = xgpu_nv_ready_to_reset,
565 	.wait_reset = xgpu_nv_wait_reset,
566 	.trans_msg = xgpu_nv_mailbox_trans_msg,
567 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
568 	.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
569 	.req_ras_err_count = xgpu_nv_req_ras_err_count,
570 	.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
571 	.req_bad_pages = xgpu_nv_req_ras_bad_pages,
572 };
573