xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c (revision b09cdeb4d38872b84c6d59878915eae2adbe9d2b)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_sdma.h"
27 #include "amdgpu_ras.h"
28 #include "amdgpu_reset.h"
29 
30 #define AMDGPU_CSA_SDMA_SIZE 64
31 /* SDMA CSA reside in the 3rd page of CSA */
32 #define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
33 
34 /*
35  * GPU SDMA IP block helpers function.
36  */
37 
38 struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
39 {
40 	struct amdgpu_device *adev = ring->adev;
41 	int i;
42 
43 	for (i = 0; i < adev->sdma.num_instances; i++)
44 		if (ring == &adev->sdma.instance[i].ring ||
45 		    ring == &adev->sdma.instance[i].page)
46 			return &adev->sdma.instance[i];
47 
48 	return NULL;
49 }
50 
51 int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
52 {
53 	struct amdgpu_device *adev = ring->adev;
54 	int i;
55 
56 	for (i = 0; i < adev->sdma.num_instances; i++) {
57 		if (ring == &adev->sdma.instance[i].ring ||
58 			ring == &adev->sdma.instance[i].page) {
59 			*index = i;
60 			return 0;
61 		}
62 	}
63 
64 	return -EINVAL;
65 }
66 
67 uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
68 				     unsigned int vmid)
69 {
70 	struct amdgpu_device *adev = ring->adev;
71 	uint64_t csa_mc_addr;
72 	uint32_t index = 0;
73 	int r;
74 
75 	/* don't enable OS preemption on SDMA under SRIOV */
76 	if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
77 		return 0;
78 
79 	if (ring->is_mes_queue) {
80 		uint32_t offset = 0;
81 
82 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
83 				  sdma[ring->idx].sdma_meta_data);
84 		csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
85 	} else {
86 		r = amdgpu_sdma_get_index_from_ring(ring, &index);
87 
88 		if (r || index > 31)
89 			csa_mc_addr = 0;
90 		else
91 			csa_mc_addr = amdgpu_csa_vaddr(adev) +
92 				AMDGPU_CSA_SDMA_OFFSET +
93 				index * AMDGPU_CSA_SDMA_SIZE;
94 	}
95 
96 	return csa_mc_addr;
97 }
98 
99 int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
100 			      struct ras_common_if *ras_block)
101 {
102 	int r, i;
103 
104 	r = amdgpu_ras_block_late_init(adev, ras_block);
105 	if (r)
106 		return r;
107 
108 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
109 		for (i = 0; i < adev->sdma.num_instances; i++) {
110 			r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
111 				AMDGPU_SDMA_IRQ_INSTANCE0 + i);
112 			if (r)
113 				goto late_fini;
114 		}
115 	}
116 
117 	return 0;
118 
119 late_fini:
120 	amdgpu_ras_block_late_fini(adev, ras_block);
121 	return r;
122 }
123 
124 int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
125 		void *err_data,
126 		struct amdgpu_iv_entry *entry)
127 {
128 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
129 
130 	if (amdgpu_sriov_vf(adev))
131 		return AMDGPU_RAS_SUCCESS;
132 
133 	amdgpu_ras_reset_gpu(adev);
134 
135 	return AMDGPU_RAS_SUCCESS;
136 }
137 
138 int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
139 				      struct amdgpu_irq_src *source,
140 				      struct amdgpu_iv_entry *entry)
141 {
142 	struct ras_common_if *ras_if = adev->sdma.ras_if;
143 	struct ras_dispatch_if ih_data = {
144 		.entry = entry,
145 	};
146 
147 	if (!ras_if)
148 		return 0;
149 
150 	ih_data.head = *ras_if;
151 
152 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
153 	return 0;
154 }
155 
156 static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
157 {
158 	uint16_t version_major;
159 	const struct common_firmware_header *header = NULL;
160 	const struct sdma_firmware_header_v1_0 *hdr;
161 	const struct sdma_firmware_header_v2_0 *hdr_v2;
162 	const struct sdma_firmware_header_v3_0 *hdr_v3;
163 
164 	header = (const struct common_firmware_header *)
165 		sdma_inst->fw->data;
166 	version_major = le16_to_cpu(header->header_version_major);
167 
168 	switch (version_major) {
169 	case 1:
170 		hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
171 		sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
172 		sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
173 		break;
174 	case 2:
175 		hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
176 		sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
177 		sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
178 		break;
179 	case 3:
180 		hdr_v3 = (const struct sdma_firmware_header_v3_0 *)sdma_inst->fw->data;
181 		sdma_inst->fw_version = le32_to_cpu(hdr_v3->header.ucode_version);
182 		sdma_inst->feature_version = le32_to_cpu(hdr_v3->ucode_feature_version);
183 		break;
184 	default:
185 		return -EINVAL;
186 	}
187 
188 	if (sdma_inst->feature_version >= 20)
189 		sdma_inst->burst_nop = true;
190 
191 	return 0;
192 }
193 
194 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
195 				  bool duplicate)
196 {
197 	int i;
198 
199 	for (i = 0; i < adev->sdma.num_instances; i++) {
200 		amdgpu_ucode_release(&adev->sdma.instance[i].fw);
201 		if (duplicate)
202 			break;
203 	}
204 
205 	memset((void *)adev->sdma.instance, 0,
206 	       sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
207 }
208 
209 int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
210 			       u32 instance, bool duplicate)
211 {
212 	struct amdgpu_firmware_info *info = NULL;
213 	const struct common_firmware_header *header = NULL;
214 	int err, i;
215 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
216 	const struct sdma_firmware_header_v3_0 *sdma_hv3;
217 	uint16_t version_major;
218 	char ucode_prefix[30];
219 
220 	amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
221 	if (instance == 0)
222 		err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
223 					   AMDGPU_UCODE_REQUIRED,
224 					   "amdgpu/%s.bin", ucode_prefix);
225 	else
226 		err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
227 					   AMDGPU_UCODE_REQUIRED,
228 					   "amdgpu/%s%d.bin", ucode_prefix, instance);
229 	if (err)
230 		goto out;
231 
232 	header = (const struct common_firmware_header *)
233 		adev->sdma.instance[instance].fw->data;
234 	version_major = le16_to_cpu(header->header_version_major);
235 
236 	if ((duplicate && instance) || (!duplicate && version_major > 1)) {
237 		err = -EINVAL;
238 		goto out;
239 	}
240 
241 	err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
242 	if (err)
243 		goto out;
244 
245 	if (duplicate) {
246 		for (i = 1; i < adev->sdma.num_instances; i++)
247 			memcpy((void *)&adev->sdma.instance[i],
248 			       (void *)&adev->sdma.instance[0],
249 			       sizeof(struct amdgpu_sdma_instance));
250 	}
251 
252 	DRM_DEBUG("psp_load == '%s'\n",
253 		  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
254 
255 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
256 		switch (version_major) {
257 		case 1:
258 			for (i = 0; i < adev->sdma.num_instances; i++) {
259 				if (!duplicate && (instance != i))
260 					continue;
261 				else {
262 					/* Use a single copy per SDMA firmware type. PSP uses the same instance for all
263 					 * groups of SDMAs */
264 					if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
265 						IP_VERSION(4, 4, 2) ||
266 					     amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
267 						IP_VERSION(4, 4, 4) ||
268 					     amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
269 						IP_VERSION(4, 4, 5)) &&
270 					    adev->firmware.load_type ==
271 						AMDGPU_FW_LOAD_PSP &&
272 					    adev->sdma.num_inst_per_aid == i) {
273 						break;
274 					}
275 					info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
276 					info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
277 					info->fw = adev->sdma.instance[i].fw;
278 					adev->firmware.fw_size +=
279 						ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
280 				}
281 			}
282 			break;
283 		case 2:
284 			sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
285 				adev->sdma.instance[0].fw->data;
286 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
287 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
288 			info->fw = adev->sdma.instance[0].fw;
289 			adev->firmware.fw_size +=
290 				ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
291 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
292 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
293 			info->fw = adev->sdma.instance[0].fw;
294 			adev->firmware.fw_size +=
295 				ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
296 			break;
297 		case 3:
298 			sdma_hv3 = (const struct sdma_firmware_header_v3_0 *)
299 				adev->sdma.instance[0].fw->data;
300 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_RS64];
301 			info->ucode_id = AMDGPU_UCODE_ID_SDMA_RS64;
302 			info->fw = adev->sdma.instance[0].fw;
303 			adev->firmware.fw_size +=
304 				ALIGN(le32_to_cpu(sdma_hv3->ucode_size_bytes), PAGE_SIZE);
305 			break;
306 		default:
307 			err = -EINVAL;
308 		}
309 	}
310 
311 out:
312 	if (err)
313 		amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
314 	return err;
315 }
316 
317 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
318 {
319 	int err = 0;
320 	struct amdgpu_sdma_ras *ras = NULL;
321 
322 	/* adev->sdma.ras is NULL, which means sdma does not
323 	 * support ras function, then do nothing here.
324 	 */
325 	if (!adev->sdma.ras)
326 		return 0;
327 
328 	ras = adev->sdma.ras;
329 
330 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
331 	if (err) {
332 		dev_err(adev->dev, "Failed to register sdma ras block!\n");
333 		return err;
334 	}
335 
336 	strcpy(ras->ras_block.ras_comm.name, "sdma");
337 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
338 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
339 	adev->sdma.ras_if = &ras->ras_block.ras_comm;
340 
341 	/* If not define special ras_late_init function, use default ras_late_init */
342 	if (!ras->ras_block.ras_late_init)
343 		ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
344 
345 	/* If not defined special ras_cb function, use default ras_cb */
346 	if (!ras->ras_block.ras_cb)
347 		ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
348 
349 	return 0;
350 }
351 
352 /*
353  * debugfs for to enable/disable sdma job submission to specific core.
354  */
355 #if defined(CONFIG_DEBUG_FS)
356 static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
357 {
358 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
359 	u64 i, num_ring;
360 	u64 mask = 0;
361 	struct amdgpu_ring *ring, *page = NULL;
362 
363 	if (!adev)
364 		return -ENODEV;
365 
366 	/* Determine the number of rings per SDMA instance
367 	 * (1 for sdma gfx ring, 2 if page queue exists)
368 	 */
369 	if (adev->sdma.has_page_queue)
370 		num_ring = 2;
371 	else
372 		num_ring = 1;
373 
374 	/* Calculate the maximum possible mask value
375 	 * based on the number of SDMA instances and rings
376 	*/
377 	mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
378 
379 	if ((val & mask) == 0)
380 		return -EINVAL;
381 
382 	for (i = 0; i < adev->sdma.num_instances; ++i) {
383 		ring = &adev->sdma.instance[i].ring;
384 		if (adev->sdma.has_page_queue)
385 			page = &adev->sdma.instance[i].page;
386 		if (val & BIT_ULL(i * num_ring))
387 			ring->sched.ready = true;
388 		else
389 			ring->sched.ready = false;
390 
391 		if (page) {
392 			if (val & BIT_ULL(i * num_ring + 1))
393 				page->sched.ready = true;
394 			else
395 				page->sched.ready = false;
396 		}
397 	}
398 	/* publish sched.ready flag update effective immediately across smp */
399 	smp_rmb();
400 	return 0;
401 }
402 
403 static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
404 {
405 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
406 	u64 i, num_ring;
407 	u64 mask = 0;
408 	struct amdgpu_ring *ring, *page = NULL;
409 
410 	if (!adev)
411 		return -ENODEV;
412 
413 	/* Determine the number of rings per SDMA instance
414 	 * (1 for sdma gfx ring, 2 if page queue exists)
415 	 */
416 	if (adev->sdma.has_page_queue)
417 		num_ring = 2;
418 	else
419 		num_ring = 1;
420 
421 	for (i = 0; i < adev->sdma.num_instances; ++i) {
422 		ring = &adev->sdma.instance[i].ring;
423 		if (adev->sdma.has_page_queue)
424 			page = &adev->sdma.instance[i].page;
425 
426 		if (ring->sched.ready)
427 			mask |= BIT_ULL(i * num_ring);
428 		else
429 			mask &= ~BIT_ULL(i * num_ring);
430 
431 		if (page) {
432 			if (page->sched.ready)
433 				mask |= BIT_ULL(i * num_ring + 1);
434 			else
435 				mask &= ~BIT_ULL(i * num_ring + 1);
436 		}
437 	}
438 
439 	*val = mask;
440 	return 0;
441 }
442 
443 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops,
444 			 amdgpu_debugfs_sdma_sched_mask_get,
445 			 amdgpu_debugfs_sdma_sched_mask_set, "%llx\n");
446 
447 #endif
448 
449 void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
450 {
451 #if defined(CONFIG_DEBUG_FS)
452 	struct drm_minor *minor = adev_to_drm(adev)->primary;
453 	struct dentry *root = minor->debugfs_root;
454 	char name[32];
455 
456 	if (!(adev->sdma.num_instances > 1))
457 		return;
458 	sprintf(name, "amdgpu_sdma_sched_mask");
459 	debugfs_create_file(name, 0600, root, adev,
460 			    &amdgpu_debugfs_sdma_sched_mask_fops);
461 #endif
462 }
463 
464 static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
465 						struct device_attribute *attr,
466 						char *buf)
467 {
468 	struct drm_device *ddev = dev_get_drvdata(dev);
469 	struct amdgpu_device *adev = drm_to_adev(ddev);
470 
471 	if (!adev)
472 		return -ENODEV;
473 
474 	return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
475 }
476 
477 static DEVICE_ATTR(sdma_reset_mask, 0444,
478 		   amdgpu_get_sdma_reset_mask, NULL);
479 
480 int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
481 {
482 	int r = 0;
483 
484 	if (!amdgpu_gpu_recovery)
485 		return r;
486 
487 	if (adev->sdma.num_instances) {
488 		r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
489 		if (r)
490 			return r;
491 	}
492 
493 	return r;
494 }
495 
496 void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
497 {
498 	if (!amdgpu_gpu_recovery)
499 		return;
500 
501 	if (adev->dev->kobj.sd) {
502 		if (adev->sdma.num_instances)
503 			device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
504 	}
505 }
506 
507 struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring)
508 {
509 	if (adev->sdma.has_page_queue &&
510 	    (ring->me < adev->sdma.num_instances) &&
511 	    (ring == &adev->sdma.instance[ring->me].ring))
512 		return &adev->sdma.instance[ring->me].page;
513 	else
514 		return NULL;
515 }
516 
517 /**
518 * amdgpu_sdma_is_shared_inv_eng - Check if a ring is an SDMA ring that shares a VM invalidation engine
519 * @adev: Pointer to the AMDGPU device structure
520 * @ring: Pointer to the ring structure to check
521 *
522 * This function checks if the given ring is an SDMA ring that shares a VM invalidation engine.
523 * It returns true if the ring is such an SDMA ring, false otherwise.
524 */
525 bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring)
526 {
527 	int i = ring->me;
528 
529 	if (!adev->sdma.has_page_queue || i >= adev->sdma.num_instances)
530 		return false;
531 
532 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
533 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
534 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
535 		return (ring == &adev->sdma.instance[i].page);
536 	else
537 		return false;
538 }
539 
540 /**
541  * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
542  * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
543  *
544  * This function allows KFD and AMDGPU to register their own callbacks for handling
545  * pre-reset and post-reset operations for engine reset. These are needed because engine
546  * reset will stop all queues on that engine.
547  */
548 void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
549 {
550 	if (!funcs)
551 		return;
552 
553 	/* Ensure the reset_callback_list is initialized */
554 	if (!adev->sdma.reset_callback_list.next) {
555 		INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
556 	}
557 	/* Initialize the list node in the callback structure */
558 	INIT_LIST_HEAD(&funcs->list);
559 
560 	/* Add the callback structure to the global list */
561 	list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
562 }
563 
564 /**
565  * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
566  * @adev: Pointer to the AMDGPU device
567  * @instance_id: ID of the SDMA engine instance to reset
568  *
569  * This function performs the following steps:
570  * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
571  * 2. Resets the specified SDMA engine instance.
572  * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
573  *
574  * Returns: 0 on success, or a negative error code on failure.
575  */
576 int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
577 {
578 	struct sdma_on_reset_funcs *funcs;
579 	int ret = 0;
580 	struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
581 	struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
582 	struct amdgpu_ring *page_ring = &sdma_instance->page;
583 	bool gfx_sched_stopped = false, page_sched_stopped = false;
584 
585 	mutex_lock(&sdma_instance->engine_reset_mutex);
586 	/* Stop the scheduler's work queue for the GFX and page rings if they are running.
587 	* This ensures that no new tasks are submitted to the queues while
588 	* the reset is in progress.
589 	*/
590 	if (!amdgpu_ring_sched_ready(gfx_ring)) {
591 		drm_sched_wqueue_stop(&gfx_ring->sched);
592 		gfx_sched_stopped = true;
593 	}
594 
595 	if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
596 		drm_sched_wqueue_stop(&page_ring->sched);
597 		page_sched_stopped = true;
598 	}
599 
600 	/* Invoke all registered pre_reset callbacks */
601 	list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
602 		if (funcs->pre_reset) {
603 			ret = funcs->pre_reset(adev, instance_id);
604 			if (ret) {
605 				dev_err(adev->dev,
606 				"beforeReset callback failed for instance %u: %d\n",
607 					instance_id, ret);
608 				goto exit;
609 			}
610 		}
611 	}
612 
613 	/* Perform the SDMA reset for the specified instance */
614 	ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
615 	if (ret) {
616 		dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
617 		goto exit;
618 	}
619 
620 	/* Invoke all registered post_reset callbacks */
621 	list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
622 		if (funcs->post_reset) {
623 			ret = funcs->post_reset(adev, instance_id);
624 			if (ret) {
625 				dev_err(adev->dev,
626 				"afterReset callback failed for instance %u: %d\n",
627 					instance_id, ret);
628 				goto exit;
629 			}
630 		}
631 	}
632 
633 exit:
634 	/* Restart the scheduler's work queue for the GFX and page rings
635 	 * if they were stopped by this function. This allows new tasks
636 	 * to be submitted to the queues after the reset is complete.
637 	 */
638 	if (!ret) {
639 		if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
640 			drm_sched_wqueue_start(&gfx_ring->sched);
641 		}
642 		if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
643 			drm_sched_wqueue_start(&page_ring->sched);
644 		}
645 	}
646 	mutex_unlock(&sdma_instance->engine_reset_mutex);
647 
648 	return ret;
649 }
650