xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c (revision a61c16258a4720065972cf04fcfee1caa6ea5fc0)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_sdma.h"
27 #include "amdgpu_ras.h"
28 #include "amdgpu_reset.h"
29 
30 #define AMDGPU_CSA_SDMA_SIZE 64
31 /* SDMA CSA reside in the 3rd page of CSA */
32 #define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
33 
34 /*
35  * GPU SDMA IP block helpers function.
36  */
37 
38 struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
39 {
40 	struct amdgpu_device *adev = ring->adev;
41 	int i;
42 
43 	for (i = 0; i < adev->sdma.num_instances; i++)
44 		if (ring == &adev->sdma.instance[i].ring ||
45 		    ring == &adev->sdma.instance[i].page)
46 			return &adev->sdma.instance[i];
47 
48 	return NULL;
49 }
50 
51 int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
52 {
53 	struct amdgpu_device *adev = ring->adev;
54 	int i;
55 
56 	for (i = 0; i < adev->sdma.num_instances; i++) {
57 		if (ring == &adev->sdma.instance[i].ring ||
58 			ring == &adev->sdma.instance[i].page) {
59 			*index = i;
60 			return 0;
61 		}
62 	}
63 
64 	return -EINVAL;
65 }
66 
67 uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
68 				     unsigned int vmid)
69 {
70 	struct amdgpu_device *adev = ring->adev;
71 	uint64_t csa_mc_addr;
72 	uint32_t index = 0;
73 	int r;
74 
75 	/* don't enable OS preemption on SDMA under SRIOV */
76 	if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
77 		return 0;
78 
79 	r = amdgpu_sdma_get_index_from_ring(ring, &index);
80 
81 	if (r || index > 31)
82 		csa_mc_addr = 0;
83 	else
84 		csa_mc_addr = amdgpu_csa_vaddr(adev) +
85 			AMDGPU_CSA_SDMA_OFFSET +
86 			index * AMDGPU_CSA_SDMA_SIZE;
87 
88 	return csa_mc_addr;
89 }
90 
91 int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
92 			      struct ras_common_if *ras_block)
93 {
94 	int r, i;
95 
96 	r = amdgpu_ras_block_late_init(adev, ras_block);
97 	if (r)
98 		return r;
99 
100 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
101 		for (i = 0; i < adev->sdma.num_instances; i++) {
102 			r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
103 				AMDGPU_SDMA_IRQ_INSTANCE0 + i);
104 			if (r)
105 				goto late_fini;
106 		}
107 	}
108 
109 	return 0;
110 
111 late_fini:
112 	amdgpu_ras_block_late_fini(adev, ras_block);
113 	return r;
114 }
115 
116 int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
117 		void *err_data,
118 		struct amdgpu_iv_entry *entry)
119 {
120 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
121 
122 	if (amdgpu_sriov_vf(adev))
123 		return AMDGPU_RAS_SUCCESS;
124 
125 	amdgpu_ras_reset_gpu(adev);
126 
127 	return AMDGPU_RAS_SUCCESS;
128 }
129 
130 int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
131 				      struct amdgpu_irq_src *source,
132 				      struct amdgpu_iv_entry *entry)
133 {
134 	struct ras_common_if *ras_if = adev->sdma.ras_if;
135 	struct ras_dispatch_if ih_data = {
136 		.entry = entry,
137 	};
138 
139 	if (!ras_if)
140 		return 0;
141 
142 	ih_data.head = *ras_if;
143 
144 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
145 	return 0;
146 }
147 
148 static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
149 {
150 	uint16_t version_major;
151 	const struct common_firmware_header *header = NULL;
152 	const struct sdma_firmware_header_v1_0 *hdr;
153 	const struct sdma_firmware_header_v2_0 *hdr_v2;
154 	const struct sdma_firmware_header_v3_0 *hdr_v3;
155 
156 	header = (const struct common_firmware_header *)
157 		sdma_inst->fw->data;
158 	version_major = le16_to_cpu(header->header_version_major);
159 
160 	switch (version_major) {
161 	case 1:
162 		hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
163 		sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
164 		sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
165 		break;
166 	case 2:
167 		hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
168 		sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
169 		sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
170 		break;
171 	case 3:
172 		hdr_v3 = (const struct sdma_firmware_header_v3_0 *)sdma_inst->fw->data;
173 		sdma_inst->fw_version = le32_to_cpu(hdr_v3->header.ucode_version);
174 		sdma_inst->feature_version = le32_to_cpu(hdr_v3->ucode_feature_version);
175 		break;
176 	default:
177 		return -EINVAL;
178 	}
179 
180 	if (sdma_inst->feature_version >= 20)
181 		sdma_inst->burst_nop = true;
182 
183 	return 0;
184 }
185 
186 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
187 				  bool duplicate)
188 {
189 	int i;
190 
191 	for (i = 0; i < adev->sdma.num_instances; i++) {
192 		amdgpu_ucode_release(&adev->sdma.instance[i].fw);
193 		if (duplicate)
194 			break;
195 	}
196 
197 	memset((void *)adev->sdma.instance, 0,
198 	       sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
199 }
200 
201 int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
202 			       u32 instance, bool duplicate)
203 {
204 	struct amdgpu_firmware_info *info = NULL;
205 	const struct common_firmware_header *header = NULL;
206 	int err, i;
207 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
208 	const struct sdma_firmware_header_v3_0 *sdma_hv3;
209 	uint16_t version_major;
210 	char ucode_prefix[30];
211 
212 	amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
213 	if (instance == 0)
214 		err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
215 					   AMDGPU_UCODE_REQUIRED,
216 					   "amdgpu/%s.bin", ucode_prefix);
217 	else
218 		err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
219 					   AMDGPU_UCODE_REQUIRED,
220 					   "amdgpu/%s%d.bin", ucode_prefix, instance);
221 	if (err)
222 		goto out;
223 
224 	header = (const struct common_firmware_header *)
225 		adev->sdma.instance[instance].fw->data;
226 	version_major = le16_to_cpu(header->header_version_major);
227 
228 	if ((duplicate && instance) || (!duplicate && version_major > 1)) {
229 		err = -EINVAL;
230 		goto out;
231 	}
232 
233 	err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
234 	if (err)
235 		goto out;
236 
237 	if (duplicate) {
238 		for (i = 1; i < adev->sdma.num_instances; i++)
239 			memcpy((void *)&adev->sdma.instance[i],
240 			       (void *)&adev->sdma.instance[0],
241 			       sizeof(struct amdgpu_sdma_instance));
242 	}
243 
244 	DRM_DEBUG("psp_load == '%s'\n",
245 		  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
246 
247 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
248 		switch (version_major) {
249 		case 1:
250 			for (i = 0; i < adev->sdma.num_instances; i++) {
251 				if (!duplicate && (instance != i))
252 					continue;
253 				else {
254 					/* Use a single copy per SDMA firmware type. PSP uses the same instance for all
255 					 * groups of SDMAs */
256 					if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
257 						IP_VERSION(4, 4, 2) ||
258 					     amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
259 						IP_VERSION(4, 4, 4) ||
260 					     amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
261 						IP_VERSION(4, 4, 5)) &&
262 					    adev->firmware.load_type ==
263 						AMDGPU_FW_LOAD_PSP &&
264 					    adev->sdma.num_inst_per_aid == i) {
265 						break;
266 					}
267 					info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
268 					info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
269 					info->fw = adev->sdma.instance[i].fw;
270 					adev->firmware.fw_size +=
271 						ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
272 				}
273 			}
274 			break;
275 		case 2:
276 			sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
277 				adev->sdma.instance[0].fw->data;
278 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
279 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
280 			info->fw = adev->sdma.instance[0].fw;
281 			adev->firmware.fw_size +=
282 				ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
283 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
284 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
285 			info->fw = adev->sdma.instance[0].fw;
286 			adev->firmware.fw_size +=
287 				ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
288 			break;
289 		case 3:
290 			sdma_hv3 = (const struct sdma_firmware_header_v3_0 *)
291 				adev->sdma.instance[0].fw->data;
292 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_RS64];
293 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_RS64;
294 			info->fw = adev->sdma.instance[0].fw;
295 			adev->firmware.fw_size +=
296 				ALIGN(le32_to_cpu(sdma_hv3->ucode_size_bytes), PAGE_SIZE);
297 			break;
298 		default:
299 			err = -EINVAL;
300 		}
301 	}
302 
303 out:
304 	if (err)
305 		amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
306 	return err;
307 }
308 
309 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
310 {
311 	int err = 0;
312 	struct amdgpu_sdma_ras *ras = NULL;
313 
314 	/* adev->sdma.ras is NULL, which means sdma does not
315 	 * support ras function, then do nothing here.
316 	 */
317 	if (!adev->sdma.ras)
318 		return 0;
319 
320 	ras = adev->sdma.ras;
321 
322 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
323 	if (err) {
324 		dev_err(adev->dev, "Failed to register sdma ras block!\n");
325 		return err;
326 	}
327 
328 	strcpy(ras->ras_block.ras_comm.name, "sdma");
329 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
330 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
331 	adev->sdma.ras_if = &ras->ras_block.ras_comm;
332 
333 	/* If not define special ras_late_init function, use default ras_late_init */
334 	if (!ras->ras_block.ras_late_init)
335 		ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
336 
337 	/* If not defined special ras_cb function, use default ras_cb */
338 	if (!ras->ras_block.ras_cb)
339 		ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
340 
341 	return 0;
342 }
343 
344 /*
345  * debugfs for to enable/disable sdma job submission to specific core.
346  */
347 #if defined(CONFIG_DEBUG_FS)
348 static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
349 {
350 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
351 	u64 i, num_ring;
352 	u64 mask = 0;
353 	struct amdgpu_ring *ring, *page = NULL;
354 
355 	if (!adev)
356 		return -ENODEV;
357 
358 	/* Determine the number of rings per SDMA instance
359 	 * (1 for sdma gfx ring, 2 if page queue exists)
360 	 */
361 	if (adev->sdma.has_page_queue)
362 		num_ring = 2;
363 	else
364 		num_ring = 1;
365 
366 	/* Calculate the maximum possible mask value
367 	 * based on the number of SDMA instances and rings
368 	*/
369 	mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
370 
371 	if ((val & mask) == 0)
372 		return -EINVAL;
373 
374 	for (i = 0; i < adev->sdma.num_instances; ++i) {
375 		ring = &adev->sdma.instance[i].ring;
376 		if (adev->sdma.has_page_queue)
377 			page = &adev->sdma.instance[i].page;
378 		if (val & BIT_ULL(i * num_ring))
379 			ring->sched.ready = true;
380 		else
381 			ring->sched.ready = false;
382 
383 		if (page) {
384 			if (val & BIT_ULL(i * num_ring + 1))
385 				page->sched.ready = true;
386 			else
387 				page->sched.ready = false;
388 		}
389 	}
390 	/* publish sched.ready flag update effective immediately across smp */
391 	smp_rmb();
392 	return 0;
393 }
394 
395 static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
396 {
397 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
398 	u64 i, num_ring;
399 	u64 mask = 0;
400 	struct amdgpu_ring *ring, *page = NULL;
401 
402 	if (!adev)
403 		return -ENODEV;
404 
405 	/* Determine the number of rings per SDMA instance
406 	 * (1 for sdma gfx ring, 2 if page queue exists)
407 	 */
408 	if (adev->sdma.has_page_queue)
409 		num_ring = 2;
410 	else
411 		num_ring = 1;
412 
413 	for (i = 0; i < adev->sdma.num_instances; ++i) {
414 		ring = &adev->sdma.instance[i].ring;
415 		if (adev->sdma.has_page_queue)
416 			page = &adev->sdma.instance[i].page;
417 
418 		if (ring->sched.ready)
419 			mask |= BIT_ULL(i * num_ring);
420 		else
421 			mask &= ~BIT_ULL(i * num_ring);
422 
423 		if (page) {
424 			if (page->sched.ready)
425 				mask |= BIT_ULL(i * num_ring + 1);
426 			else
427 				mask &= ~BIT_ULL(i * num_ring + 1);
428 		}
429 	}
430 
431 	*val = mask;
432 	return 0;
433 }
434 
435 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops,
436 			 amdgpu_debugfs_sdma_sched_mask_get,
437 			 amdgpu_debugfs_sdma_sched_mask_set, "%llx\n");
438 
439 #endif
440 
441 void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
442 {
443 #if defined(CONFIG_DEBUG_FS)
444 	struct drm_minor *minor = adev_to_drm(adev)->primary;
445 	struct dentry *root = minor->debugfs_root;
446 	char name[32];
447 
448 	if (!(adev->sdma.num_instances > 1))
449 		return;
450 	sprintf(name, "amdgpu_sdma_sched_mask");
451 	debugfs_create_file(name, 0600, root, adev,
452 			    &amdgpu_debugfs_sdma_sched_mask_fops);
453 #endif
454 }
455 
456 static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
457 						struct device_attribute *attr,
458 						char *buf)
459 {
460 	struct drm_device *ddev = dev_get_drvdata(dev);
461 	struct amdgpu_device *adev = drm_to_adev(ddev);
462 
463 	if (!adev)
464 		return -ENODEV;
465 
466 	return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
467 }
468 
469 static DEVICE_ATTR(sdma_reset_mask, 0444,
470 		   amdgpu_get_sdma_reset_mask, NULL);
471 
472 int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
473 {
474 	int r = 0;
475 
476 	if (!amdgpu_gpu_recovery)
477 		return r;
478 
479 	if (adev->sdma.num_instances) {
480 		r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
481 		if (r)
482 			return r;
483 	}
484 
485 	return r;
486 }
487 
488 void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
489 {
490 	if (!amdgpu_gpu_recovery)
491 		return;
492 
493 	if (adev->dev->kobj.sd) {
494 		if (adev->sdma.num_instances)
495 			device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
496 	}
497 }
498 
499 struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring)
500 {
501 	if (adev->sdma.has_page_queue &&
502 	    (ring->me < adev->sdma.num_instances) &&
503 	    (ring == &adev->sdma.instance[ring->me].ring))
504 		return &adev->sdma.instance[ring->me].page;
505 	else
506 		return NULL;
507 }
508 
509 /**
510 * amdgpu_sdma_is_shared_inv_eng - Check if a ring is an SDMA ring that shares a VM invalidation engine
511 * @adev: Pointer to the AMDGPU device structure
512 * @ring: Pointer to the ring structure to check
513 *
514 * This function checks if the given ring is an SDMA ring that shares a VM invalidation engine.
515 * It returns true if the ring is such an SDMA ring, false otherwise.
516 */
517 bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring)
518 {
519 	int i = ring->me;
520 
521 	if (!adev->sdma.has_page_queue || i >= adev->sdma.num_instances)
522 		return false;
523 
524 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
525 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
526 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
527 		return (ring == &adev->sdma.instance[i].page);
528 	else
529 		return false;
530 }
531 
532 /**
533  * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
534  * @adev: Pointer to the AMDGPU device
535  * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
536  *
537  * This function allows KFD and AMDGPU to register their own callbacks for handling
538  * pre-reset and post-reset operations for engine reset. These are needed because engine
539  * reset will stop all queues on that engine.
540  */
541 void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
542 {
543 	if (!funcs)
544 		return;
545 
546 	/* Ensure the reset_callback_list is initialized */
547 	if (!adev->sdma.reset_callback_list.next) {
548 		INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
549 	}
550 	/* Initialize the list node in the callback structure */
551 	INIT_LIST_HEAD(&funcs->list);
552 
553 	/* Add the callback structure to the global list */
554 	list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
555 }
556 
557 /**
558  * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
559  * @adev: Pointer to the AMDGPU device
560  * @instance_id: ID of the SDMA engine instance to reset
561  *
562  * This function performs the following steps:
563  * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
564  * 2. Resets the specified SDMA engine instance.
565  * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
566  *
567  * Returns: 0 on success, or a negative error code on failure.
568  */
569 int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
570 {
571 	struct sdma_on_reset_funcs *funcs;
572 	int ret = 0;
573 	struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
574 	struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
575 	struct amdgpu_ring *page_ring = &sdma_instance->page;
576 	bool gfx_sched_stopped = false, page_sched_stopped = false;
577 
578 	mutex_lock(&sdma_instance->engine_reset_mutex);
579 	/* Stop the scheduler's work queue for the GFX and page rings if they are running.
580 	* This ensures that no new tasks are submitted to the queues while
581 	* the reset is in progress.
582 	*/
583 	if (!amdgpu_ring_sched_ready(gfx_ring)) {
584 		drm_sched_wqueue_stop(&gfx_ring->sched);
585 		gfx_sched_stopped = true;
586 	}
587 
588 	if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
589 		drm_sched_wqueue_stop(&page_ring->sched);
590 		page_sched_stopped = true;
591 	}
592 
593 	/* Invoke all registered pre_reset callbacks */
594 	list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
595 		if (funcs->pre_reset) {
596 			ret = funcs->pre_reset(adev, instance_id);
597 			if (ret) {
598 				dev_err(adev->dev,
599 				"beforeReset callback failed for instance %u: %d\n",
600 					instance_id, ret);
601 				goto exit;
602 			}
603 		}
604 	}
605 
606 	/* Perform the SDMA reset for the specified instance */
607 	ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
608 	if (ret) {
609 		dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
610 		goto exit;
611 	}
612 
613 	/* Invoke all registered post_reset callbacks */
614 	list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
615 		if (funcs->post_reset) {
616 			ret = funcs->post_reset(adev, instance_id);
617 			if (ret) {
618 				dev_err(adev->dev,
619 				"afterReset callback failed for instance %u: %d\n",
620 					instance_id, ret);
621 				goto exit;
622 			}
623 		}
624 	}
625 
626 exit:
627 	/* Restart the scheduler's work queue for the GFX and page rings
628 	 * if they were stopped by this function. This allows new tasks
629 	 * to be submitted to the queues after the reset is complete.
630 	 */
631 	if (!ret) {
632 		if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
633 			drm_sched_wqueue_start(&gfx_ring->sched);
634 		}
635 		if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
636 			drm_sched_wqueue_start(&page_ring->sched);
637 		}
638 	}
639 	mutex_unlock(&sdma_instance->engine_reset_mutex);
640 
641 	return ret;
642 }
643