xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_device.c (revision c0d4cc9007971f7412f7ee4cbbe98b06b7da813f)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include "kfd_priv.h"
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "amdgpu_amdkfd.h"
33 #include "kfd_smi_events.h"
34 #include "kfd_svm.h"
35 #include "kfd_migrate.h"
36 #include "amdgpu.h"
37 #include "amdgpu_xcp.h"
38 
39 #define MQD_SIZE_ALIGNED 768
40 
41 /*
42  * kfd_locked is used to lock the kfd driver during suspend or reset
43  * once locked, kfd driver will stop any further GPU execution.
44  * create process (open) will return -EAGAIN.
45  */
46 static int kfd_locked;
47 
48 #ifdef CONFIG_DRM_AMDGPU_CIK
49 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
50 #endif
51 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
52 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
53 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
54 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
55 extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
56 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
57 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
58 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
59 extern const struct kfd2kgd_calls gfx_v12_kfd2kgd;
60 
61 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
62 				unsigned int chunk_size);
63 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
64 
65 static int kfd_resume(struct kfd_node *kfd);
66 
67 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
68 {
69 	uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);
70 
71 	switch (sdma_version) {
72 	case IP_VERSION(4, 0, 0):/* VEGA10 */
73 	case IP_VERSION(4, 0, 1):/* VEGA12 */
74 	case IP_VERSION(4, 1, 0):/* RAVEN */
75 	case IP_VERSION(4, 1, 1):/* RAVEN */
76 	case IP_VERSION(4, 1, 2):/* RENOIR */
77 	case IP_VERSION(5, 2, 1):/* VANGOGH */
78 	case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
79 	case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
80 	case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
81 		kfd->device_info.num_sdma_queues_per_engine = 2;
82 		break;
83 	case IP_VERSION(4, 2, 0):/* VEGA20 */
84 	case IP_VERSION(4, 2, 2):/* ARCTURUS */
85 	case IP_VERSION(4, 4, 0):/* ALDEBARAN */
86 	case IP_VERSION(4, 4, 2):
87 	case IP_VERSION(4, 4, 5):
88 	case IP_VERSION(4, 4, 4):
89 	case IP_VERSION(5, 0, 0):/* NAVI10 */
90 	case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
91 	case IP_VERSION(5, 0, 2):/* NAVI14 */
92 	case IP_VERSION(5, 0, 5):/* NAVI12 */
93 	case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
94 	case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
95 	case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
96 	case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
97 		kfd->device_info.num_sdma_queues_per_engine = 8;
98 		break;
99 	case IP_VERSION(6, 0, 0):
100 	case IP_VERSION(6, 0, 1):
101 	case IP_VERSION(6, 0, 2):
102 	case IP_VERSION(6, 0, 3):
103 	case IP_VERSION(6, 1, 0):
104 	case IP_VERSION(6, 1, 1):
105 	case IP_VERSION(6, 1, 2):
106 	case IP_VERSION(6, 1, 3):
107 	case IP_VERSION(7, 0, 0):
108 	case IP_VERSION(7, 0, 1):
109 		kfd->device_info.num_sdma_queues_per_engine = 8;
110 		/* Reserve 1 for paging and 1 for gfx */
111 		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
112 		break;
113 	default:
114 		dev_warn(kfd_device,
115 			"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
116 			sdma_version);
117 		kfd->device_info.num_sdma_queues_per_engine = 8;
118 	}
119 }
120 
121 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
122 {
123 	uint32_t gc_version = KFD_GC_VERSION(kfd);
124 
125 	switch (gc_version) {
126 	case IP_VERSION(9, 0, 1): /* VEGA10 */
127 	case IP_VERSION(9, 1, 0): /* RAVEN */
128 	case IP_VERSION(9, 2, 1): /* VEGA12 */
129 	case IP_VERSION(9, 2, 2): /* RAVEN */
130 	case IP_VERSION(9, 3, 0): /* RENOIR */
131 	case IP_VERSION(9, 4, 0): /* VEGA20 */
132 	case IP_VERSION(9, 4, 1): /* ARCTURUS */
133 	case IP_VERSION(9, 4, 2): /* ALDEBARAN */
134 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
135 		break;
136 	case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
137 	case IP_VERSION(9, 4, 4): /* GC 9.4.4 */
138 	case IP_VERSION(9, 5, 0): /* GC 9.5.0 */
139 		kfd->device_info.event_interrupt_class =
140 						&event_interrupt_class_v9_4_3;
141 		break;
142 	case IP_VERSION(10, 3, 1): /* VANGOGH */
143 	case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
144 	case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
145 	case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
146 	case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
147 	case IP_VERSION(10, 1, 4):
148 	case IP_VERSION(10, 1, 10): /* NAVI10 */
149 	case IP_VERSION(10, 1, 2): /* NAVI12 */
150 	case IP_VERSION(10, 1, 1): /* NAVI14 */
151 	case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
152 	case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
153 	case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
154 	case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
155 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
156 		break;
157 	case IP_VERSION(11, 0, 0):
158 	case IP_VERSION(11, 0, 1):
159 	case IP_VERSION(11, 0, 2):
160 	case IP_VERSION(11, 0, 3):
161 	case IP_VERSION(11, 0, 4):
162 	case IP_VERSION(11, 5, 0):
163 	case IP_VERSION(11, 5, 1):
164 	case IP_VERSION(11, 5, 2):
165 	case IP_VERSION(11, 5, 3):
166 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
167 		break;
168 	case IP_VERSION(12, 0, 0):
169 	case IP_VERSION(12, 0, 1):
170 		/* GFX12_TODO: Change to v12 version. */
171 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
172 		break;
173 	default:
174 		dev_warn(kfd_device, "v9 event interrupt handler is set due to "
175 			"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
176 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
177 	}
178 }
179 
180 static void kfd_device_info_init(struct kfd_dev *kfd,
181 				 bool vf, uint32_t gfx_target_version)
182 {
183 	uint32_t gc_version = KFD_GC_VERSION(kfd);
184 	uint32_t asic_type = kfd->adev->asic_type;
185 
186 	kfd->device_info.max_pasid_bits = 16;
187 	kfd->device_info.max_no_of_hqd = 24;
188 	kfd->device_info.num_of_watch_points = 4;
189 	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
190 	kfd->device_info.gfx_target_version = gfx_target_version;
191 
192 	if (KFD_IS_SOC15(kfd)) {
193 		kfd->device_info.doorbell_size = 8;
194 		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
195 		kfd->device_info.supports_cwsr = true;
196 
197 		kfd_device_info_set_sdma_info(kfd);
198 
199 		kfd_device_info_set_event_interrupt_class(kfd);
200 
201 		if (gc_version < IP_VERSION(11, 0, 0)) {
202 			/* Navi2x+, Navi1x+ */
203 			if (gc_version == IP_VERSION(10, 3, 6))
204 				kfd->device_info.no_atomic_fw_version = 14;
205 			else if (gc_version == IP_VERSION(10, 3, 7))
206 				kfd->device_info.no_atomic_fw_version = 3;
207 			else if (gc_version >= IP_VERSION(10, 3, 0))
208 				kfd->device_info.no_atomic_fw_version = 92;
209 			else if (gc_version >= IP_VERSION(10, 1, 1))
210 				kfd->device_info.no_atomic_fw_version = 145;
211 
212 			/* Navi1x+ */
213 			if (gc_version >= IP_VERSION(10, 1, 1))
214 				kfd->device_info.needs_pci_atomics = true;
215 		} else if (gc_version < IP_VERSION(12, 0, 0)) {
216 			/*
217 			 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
218 			 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
219 			 * PCIe atomics support.
220 			 */
221 			kfd->device_info.needs_pci_atomics = true;
222 			kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
223 		} else if (gc_version < IP_VERSION(13, 0, 0)) {
224 			kfd->device_info.needs_pci_atomics = true;
225 			kfd->device_info.no_atomic_fw_version = 2090;
226 		} else {
227 			kfd->device_info.needs_pci_atomics = true;
228 		}
229 	} else {
230 		kfd->device_info.doorbell_size = 4;
231 		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
232 		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
233 		kfd->device_info.num_sdma_queues_per_engine = 2;
234 
235 		if (asic_type != CHIP_KAVERI &&
236 		    asic_type != CHIP_HAWAII &&
237 		    asic_type != CHIP_TONGA)
238 			kfd->device_info.supports_cwsr = true;
239 
240 		if (asic_type != CHIP_HAWAII && !vf)
241 			kfd->device_info.needs_pci_atomics = true;
242 	}
243 }
244 
245 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
246 {
247 	struct kfd_dev *kfd = NULL;
248 	const struct kfd2kgd_calls *f2g = NULL;
249 	uint32_t gfx_target_version = 0;
250 
251 	switch (adev->asic_type) {
252 #ifdef CONFIG_DRM_AMDGPU_CIK
253 	case CHIP_KAVERI:
254 		gfx_target_version = 70000;
255 		if (!vf)
256 			f2g = &gfx_v7_kfd2kgd;
257 		break;
258 #endif
259 	case CHIP_CARRIZO:
260 		gfx_target_version = 80001;
261 		if (!vf)
262 			f2g = &gfx_v8_kfd2kgd;
263 		break;
264 #ifdef CONFIG_DRM_AMDGPU_CIK
265 	case CHIP_HAWAII:
266 		gfx_target_version = 70001;
267 		if (!amdgpu_exp_hw_support)
268 			pr_info(
269 	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
270 				);
271 		else if (!vf)
272 			f2g = &gfx_v7_kfd2kgd;
273 		break;
274 #endif
275 	case CHIP_TONGA:
276 		gfx_target_version = 80002;
277 		if (!vf)
278 			f2g = &gfx_v8_kfd2kgd;
279 		break;
280 	case CHIP_FIJI:
281 	case CHIP_POLARIS10:
282 		gfx_target_version = 80003;
283 		f2g = &gfx_v8_kfd2kgd;
284 		break;
285 	case CHIP_POLARIS11:
286 	case CHIP_POLARIS12:
287 	case CHIP_VEGAM:
288 		gfx_target_version = 80003;
289 		if (!vf)
290 			f2g = &gfx_v8_kfd2kgd;
291 		break;
292 	default:
293 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
294 		/* Vega 10 */
295 		case IP_VERSION(9, 0, 1):
296 			gfx_target_version = 90000;
297 			f2g = &gfx_v9_kfd2kgd;
298 			break;
299 		/* Raven */
300 		case IP_VERSION(9, 1, 0):
301 		case IP_VERSION(9, 2, 2):
302 			gfx_target_version = 90002;
303 			if (!vf)
304 				f2g = &gfx_v9_kfd2kgd;
305 			break;
306 		/* Vega12 */
307 		case IP_VERSION(9, 2, 1):
308 			gfx_target_version = 90004;
309 			if (!vf)
310 				f2g = &gfx_v9_kfd2kgd;
311 			break;
312 		/* Renoir */
313 		case IP_VERSION(9, 3, 0):
314 			gfx_target_version = 90012;
315 			if (!vf)
316 				f2g = &gfx_v9_kfd2kgd;
317 			break;
318 		/* Vega20 */
319 		case IP_VERSION(9, 4, 0):
320 			gfx_target_version = 90006;
321 			if (!vf)
322 				f2g = &gfx_v9_kfd2kgd;
323 			break;
324 		/* Arcturus */
325 		case IP_VERSION(9, 4, 1):
326 			gfx_target_version = 90008;
327 			f2g = &arcturus_kfd2kgd;
328 			break;
329 		/* Aldebaran */
330 		case IP_VERSION(9, 4, 2):
331 			gfx_target_version = 90010;
332 			f2g = &aldebaran_kfd2kgd;
333 			break;
334 		case IP_VERSION(9, 4, 3):
335 		case IP_VERSION(9, 4, 4):
336 			gfx_target_version = 90402;
337 			f2g = &gc_9_4_3_kfd2kgd;
338 			break;
339 		case IP_VERSION(9, 5, 0):
340 			gfx_target_version = 90500;
341 			f2g = &gc_9_4_3_kfd2kgd;
342 			break;
343 		/* Navi10 */
344 		case IP_VERSION(10, 1, 10):
345 			gfx_target_version = 100100;
346 			if (!vf)
347 				f2g = &gfx_v10_kfd2kgd;
348 			break;
349 		/* Navi12 */
350 		case IP_VERSION(10, 1, 2):
351 			gfx_target_version = 100101;
352 			f2g = &gfx_v10_kfd2kgd;
353 			break;
354 		/* Navi14 */
355 		case IP_VERSION(10, 1, 1):
356 			gfx_target_version = 100102;
357 			if (!vf)
358 				f2g = &gfx_v10_kfd2kgd;
359 			break;
360 		/* Cyan Skillfish */
361 		case IP_VERSION(10, 1, 3):
362 		case IP_VERSION(10, 1, 4):
363 			gfx_target_version = 100103;
364 			if (!vf)
365 				f2g = &gfx_v10_kfd2kgd;
366 			break;
367 		/* Sienna Cichlid */
368 		case IP_VERSION(10, 3, 0):
369 			gfx_target_version = 100300;
370 			f2g = &gfx_v10_3_kfd2kgd;
371 			break;
372 		/* Navy Flounder */
373 		case IP_VERSION(10, 3, 2):
374 			gfx_target_version = 100301;
375 			f2g = &gfx_v10_3_kfd2kgd;
376 			break;
377 		/* Van Gogh */
378 		case IP_VERSION(10, 3, 1):
379 			gfx_target_version = 100303;
380 			if (!vf)
381 				f2g = &gfx_v10_3_kfd2kgd;
382 			break;
383 		/* Dimgrey Cavefish */
384 		case IP_VERSION(10, 3, 4):
385 			gfx_target_version = 100302;
386 			f2g = &gfx_v10_3_kfd2kgd;
387 			break;
388 		/* Beige Goby */
389 		case IP_VERSION(10, 3, 5):
390 			gfx_target_version = 100304;
391 			f2g = &gfx_v10_3_kfd2kgd;
392 			break;
393 		/* Yellow Carp */
394 		case IP_VERSION(10, 3, 3):
395 			gfx_target_version = 100305;
396 			if (!vf)
397 				f2g = &gfx_v10_3_kfd2kgd;
398 			break;
399 		case IP_VERSION(10, 3, 6):
400 		case IP_VERSION(10, 3, 7):
401 			gfx_target_version = 100306;
402 			if (!vf)
403 				f2g = &gfx_v10_3_kfd2kgd;
404 			break;
405 		case IP_VERSION(11, 0, 0):
406 			gfx_target_version = 110000;
407 			f2g = &gfx_v11_kfd2kgd;
408 			break;
409 		case IP_VERSION(11, 0, 1):
410 		case IP_VERSION(11, 0, 4):
411 			gfx_target_version = 110003;
412 			f2g = &gfx_v11_kfd2kgd;
413 			break;
414 		case IP_VERSION(11, 0, 2):
415 			gfx_target_version = 110002;
416 			f2g = &gfx_v11_kfd2kgd;
417 			break;
418 		case IP_VERSION(11, 0, 3):
419 			/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
420 			gfx_target_version = 110001;
421 			f2g = &gfx_v11_kfd2kgd;
422 			break;
423 		case IP_VERSION(11, 5, 0):
424 			gfx_target_version = 110500;
425 			f2g = &gfx_v11_kfd2kgd;
426 			break;
427 		case IP_VERSION(11, 5, 1):
428 			gfx_target_version = 110501;
429 			f2g = &gfx_v11_kfd2kgd;
430 			break;
431 		case IP_VERSION(11, 5, 2):
432 			gfx_target_version = 110502;
433 			f2g = &gfx_v11_kfd2kgd;
434 			break;
435 		case IP_VERSION(11, 5, 3):
436 			gfx_target_version = 110503;
437 			f2g = &gfx_v11_kfd2kgd;
438 			break;
439 		case IP_VERSION(12, 0, 0):
440 			gfx_target_version = 120000;
441 			f2g = &gfx_v12_kfd2kgd;
442 			break;
443 		case IP_VERSION(12, 0, 1):
444 			gfx_target_version = 120001;
445 			f2g = &gfx_v12_kfd2kgd;
446 			break;
447 		default:
448 			break;
449 		}
450 		break;
451 	}
452 
453 	if (!f2g) {
454 		if (amdgpu_ip_version(adev, GC_HWIP, 0))
455 			dev_info(kfd_device,
456 				"GC IP %06x %s not supported in kfd\n",
457 				amdgpu_ip_version(adev, GC_HWIP, 0),
458 				vf ? "VF" : "");
459 		else
460 			dev_info(kfd_device, "%s %s not supported in kfd\n",
461 				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
462 		return NULL;
463 	}
464 
465 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
466 	if (!kfd)
467 		return NULL;
468 
469 	kfd->adev = adev;
470 	kfd_device_info_init(kfd, vf, gfx_target_version);
471 	kfd->init_complete = false;
472 	kfd->kfd2kgd = f2g;
473 	atomic_set(&kfd->compute_profile, 0);
474 
475 	mutex_init(&kfd->doorbell_mutex);
476 
477 	ida_init(&kfd->doorbell_ida);
478 	atomic_set(&kfd->kfd_processes_count, 0);
479 
480 	return kfd;
481 }
482 
483 static void kfd_cwsr_init(struct kfd_dev *kfd)
484 {
485 	if (cwsr_enable && kfd->device_info.supports_cwsr) {
486 		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
487 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex)
488 					     > KFD_CWSR_TMA_OFFSET);
489 			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
490 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
491 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
492 			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex)
493 					     > KFD_CWSR_TMA_OFFSET);
494 			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
495 			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
496 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
497 			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex)
498 					     > KFD_CWSR_TMA_OFFSET);
499 			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
500 			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
501 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
502 			   KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) {
503 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex)
504 					     > KFD_CWSR_TMA_OFFSET);
505 			kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
506 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
507 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) {
508 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
509 			kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex;
510 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex);
511 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
512 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
513 					     > KFD_CWSR_TMA_OFFSET);
514 			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
515 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
516 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
517 			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex)
518 					     > KFD_CWSR_TMA_OFFSET);
519 			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
520 			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
521 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
522 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex)
523 					     > KFD_CWSR_TMA_OFFSET);
524 			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
525 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
526 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(12, 0, 0)) {
527 			/* The gfx11 cwsr trap handler must fit inside a single
528 			   page. */
529 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
530 			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
531 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
532 		} else {
533 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
534 					     > KFD_CWSR_TMA_OFFSET);
535 			kfd->cwsr_isa = cwsr_trap_gfx12_hex;
536 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
537 		}
538 
539 		kfd->cwsr_enabled = true;
540 	}
541 }
542 
543 static int kfd_gws_init(struct kfd_node *node)
544 {
545 	int ret = 0;
546 	struct kfd_dev *kfd = node->kfd;
547 	uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
548 
549 	if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
550 		return 0;
551 
552 	if (hws_gws_support || (KFD_IS_SOC15(node) &&
553 		((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
554 			&& kfd->mec2_fw_version >= 0x81b3) ||
555 		(KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
556 			&& kfd->mec2_fw_version >= 0x1b3)  ||
557 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
558 			&& kfd->mec2_fw_version >= 0x30)   ||
559 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
560 			&& kfd->mec2_fw_version >= 0x28) ||
561 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
562 		 KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
563 		(KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
564 		(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
565 			&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
566 			&& kfd->mec2_fw_version >= 0x6b) ||
567 		(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
568 			&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
569 			&& mes_rev >= 68) ||
570 		(KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))))) {
571 		if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
572 			node->adev->gds.gws_size = 64;
573 		ret = amdgpu_amdkfd_alloc_gws(node->adev,
574 				node->adev->gds.gws_size, &node->gws);
575 	}
576 
577 	return ret;
578 }
579 
580 static void kfd_smi_init(struct kfd_node *dev)
581 {
582 	INIT_LIST_HEAD(&dev->smi_clients);
583 	spin_lock_init(&dev->smi_lock);
584 }
585 
586 static int kfd_init_node(struct kfd_node *node)
587 {
588 	int err = -1;
589 
590 	if (kfd_interrupt_init(node)) {
591 		dev_err(kfd_device, "Error initializing interrupts\n");
592 		goto kfd_interrupt_error;
593 	}
594 
595 	node->dqm = device_queue_manager_init(node);
596 	if (!node->dqm) {
597 		dev_err(kfd_device, "Error initializing queue manager\n");
598 		goto device_queue_manager_error;
599 	}
600 
601 	if (kfd_gws_init(node)) {
602 		dev_err(kfd_device, "Could not allocate %d gws\n",
603 			node->adev->gds.gws_size);
604 		goto gws_error;
605 	}
606 
607 	if (kfd_resume(node))
608 		goto kfd_resume_error;
609 
610 	if (kfd_topology_add_device(node)) {
611 		dev_err(kfd_device, "Error adding device to topology\n");
612 		goto kfd_topology_add_device_error;
613 	}
614 
615 	kfd_smi_init(node);
616 
617 	return 0;
618 
619 kfd_topology_add_device_error:
620 kfd_resume_error:
621 gws_error:
622 	device_queue_manager_uninit(node->dqm);
623 device_queue_manager_error:
624 	kfd_interrupt_exit(node);
625 kfd_interrupt_error:
626 	if (node->gws)
627 		amdgpu_amdkfd_free_gws(node->adev, node->gws);
628 
629 	/* Cleanup the node memory here */
630 	kfree(node);
631 	return err;
632 }
633 
634 static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
635 {
636 	struct kfd_node *knode;
637 	unsigned int i;
638 
639 	/*
640 	 * flush_work ensures that there are no outstanding
641 	 * work-queue items that will access interrupt_ring. New work items
642 	 * can't be created because we stopped interrupt handling above.
643 	 */
644 	flush_workqueue(kfd->ih_wq);
645 	destroy_workqueue(kfd->ih_wq);
646 
647 	for (i = 0; i < num_nodes; i++) {
648 		knode = kfd->nodes[i];
649 		device_queue_manager_uninit(knode->dqm);
650 		kfd_interrupt_exit(knode);
651 		kfd_topology_remove_device(knode);
652 		if (knode->gws)
653 			amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
654 		kfree(knode);
655 		kfd->nodes[i] = NULL;
656 	}
657 }
658 
659 static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
660 				       unsigned int kfd_node_idx)
661 {
662 	struct amdgpu_device *adev = node->adev;
663 	uint32_t xcc_mask = node->xcc_mask;
664 	uint32_t xcc, mapped_xcc;
665 	/*
666 	 * Interrupt bitmap is setup for processing interrupts from
667 	 * different XCDs and AIDs.
668 	 * Interrupt bitmap is defined as follows:
669 	 * 1. Bits 0-15 - correspond to the NodeId field.
670 	 *    Each bit corresponds to NodeId number. For example, if
671 	 *    a KFD node has interrupt bitmap set to 0x7, then this
672 	 *    KFD node will process interrupts with NodeId = 0, 1 and 2
673 	 *    in the IH cookie.
674 	 * 2. Bits 16-31 - unused.
675 	 *
676 	 * Please note that the kfd_node_idx argument passed to this
677 	 * function is not related to NodeId field received in the
678 	 * IH cookie.
679 	 *
680 	 * In CPX mode, a KFD node will process an interrupt if:
681 	 * - the Node Id matches the corresponding bit set in
682 	 *   Bits 0-15.
683 	 * - AND VMID reported in the interrupt lies within the
684 	 *   VMID range of the node.
685 	 */
686 	for_each_inst(xcc, xcc_mask) {
687 		mapped_xcc = GET_INST(GC, xcc);
688 		node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
689 	}
690 	dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
691 							node->interrupt_bitmap);
692 }
693 
694 bool kgd2kfd_device_init(struct kfd_dev *kfd,
695 			 const struct kgd2kfd_shared_resources *gpu_resources)
696 {
697 	unsigned int size, map_process_packet_size, i;
698 	struct kfd_node *node;
699 	uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
700 	unsigned int max_proc_per_quantum;
701 	int partition_mode;
702 	int xcp_idx;
703 
704 	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
705 			KGD_ENGINE_MEC1);
706 	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
707 			KGD_ENGINE_MEC2);
708 	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
709 			KGD_ENGINE_SDMA1);
710 	kfd->shared_resources = *gpu_resources;
711 
712 	kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
713 
714 	if (kfd->num_nodes == 0) {
715 		dev_err(kfd_device,
716 			"KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
717 			kfd->adev->gfx.num_xcc_per_xcp);
718 		goto out;
719 	}
720 
721 	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
722 	 * 32 and 64-bit requests are possible and must be
723 	 * supported.
724 	 */
725 	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
726 	if (!kfd->pci_atomic_requested &&
727 	    kfd->device_info.needs_pci_atomics &&
728 	    (!kfd->device_info.no_atomic_fw_version ||
729 	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
730 		dev_info(kfd_device,
731 			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
732 			 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
733 			 kfd->mec_fw_version,
734 			 kfd->device_info.no_atomic_fw_version);
735 		return false;
736 	}
737 
738 	first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
739 	last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
740 	vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
741 
742 	/* For multi-partition capable GPUs, we need special handling for VMIDs
743 	 * depending on partition mode.
744 	 * In CPX mode, the VMID range needs to be shared between XCDs.
745 	 * Additionally, there are 13 VMIDs (3-15) available for KFD. To
746 	 * divide them equally, we change starting VMID to 4 and not use
747 	 * VMID 3.
748 	 * If the VMID range changes for multi-partition capable GPUs, then
749 	 * this code MUST be revisited.
750 	 */
751 	if (kfd->adev->xcp_mgr) {
752 		partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
753 								 AMDGPU_XCP_FL_LOCKED);
754 		if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
755 		    kfd->num_nodes != 1) {
756 			vmid_num_kfd /= 2;
757 			first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
758 		}
759 	}
760 
761 	/* Verify module parameters regarding mapped process number*/
762 	if (hws_max_conc_proc >= 0)
763 		max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
764 	else
765 		max_proc_per_quantum = vmid_num_kfd;
766 
767 	/* calculate max size of mqds needed for queues */
768 	size = max_num_of_queues_per_device *
769 			kfd->device_info.mqd_size_aligned;
770 
771 	/*
772 	 * calculate max size of runlist packet.
773 	 * There can be only 2 packets at once
774 	 */
775 	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
776 				sizeof(struct pm4_mes_map_process_aldebaran) :
777 				sizeof(struct pm4_mes_map_process);
778 	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
779 		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
780 		+ sizeof(struct pm4_mes_runlist)) * 2;
781 
782 	/* Add size of HIQ & DIQ */
783 	size += KFD_KERNEL_QUEUE_SIZE * 2;
784 
785 	/* add another 512KB for all other allocations on gart (HPD, fences) */
786 	size += 512 * 1024;
787 
788 	if (amdgpu_amdkfd_alloc_gtt_mem(
789 			kfd->adev, size, &kfd->gtt_mem,
790 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
791 			false)) {
792 		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
793 		goto alloc_gtt_mem_failure;
794 	}
795 
796 	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
797 
798 	/* Initialize GTT sa with 512 byte chunk size */
799 	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
800 		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
801 		goto kfd_gtt_sa_init_error;
802 	}
803 
804 	if (kfd_doorbell_init(kfd)) {
805 		dev_err(kfd_device,
806 			"Error initializing doorbell aperture\n");
807 		goto kfd_doorbell_error;
808 	}
809 
810 	if (amdgpu_use_xgmi_p2p)
811 		kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
812 
813 	/*
814 	 * For multi-partition capable GPUs, the KFD abstracts all partitions
815 	 * within a socket as xGMI connected in the topology so assign a unique
816 	 * hive id per device based on the pci device location if device is in
817 	 * PCIe mode.
818 	 */
819 	if (!kfd->hive_id && kfd->num_nodes > 1)
820 		kfd->hive_id = pci_dev_id(kfd->adev->pdev);
821 
822 	kfd->noretry = kfd->adev->gmc.noretry;
823 
824 	kfd_cwsr_init(kfd);
825 
826 	dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
827 				kfd->num_nodes);
828 
829 	/* Allocate the KFD nodes */
830 	for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
831 		node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
832 		if (!node)
833 			goto node_alloc_error;
834 
835 		node->node_id = i;
836 		node->adev = kfd->adev;
837 		node->kfd = kfd;
838 		node->kfd2kgd = kfd->kfd2kgd;
839 		node->vm_info.vmid_num_kfd = vmid_num_kfd;
840 		node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
841 		/* TODO : Check if error handling is needed */
842 		if (node->xcp) {
843 			amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
844 						    &node->xcc_mask);
845 			++xcp_idx;
846 		} else {
847 			node->xcc_mask =
848 				(1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
849 		}
850 
851 		if (node->xcp) {
852 			dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
853 				node->node_id, node->xcp->mem_id,
854 				KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
855 		}
856 
857 		if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
858 		    kfd->num_nodes != 1) {
859 			/* For multi-partition capable GPUs and CPX mode, first
860 			 * XCD gets VMID range 4-9 and second XCD gets VMID
861 			 * range 10-15.
862 			 */
863 
864 			node->vm_info.first_vmid_kfd = (i%2 == 0) ?
865 						first_vmid_kfd :
866 						first_vmid_kfd+vmid_num_kfd;
867 			node->vm_info.last_vmid_kfd = (i%2 == 0) ?
868 						last_vmid_kfd-vmid_num_kfd :
869 						last_vmid_kfd;
870 			node->compute_vmid_bitmap =
871 				((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
872 				((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
873 		} else {
874 			node->vm_info.first_vmid_kfd = first_vmid_kfd;
875 			node->vm_info.last_vmid_kfd = last_vmid_kfd;
876 			node->compute_vmid_bitmap =
877 				gpu_resources->compute_vmid_bitmap;
878 		}
879 		node->max_proc_per_quantum = max_proc_per_quantum;
880 		atomic_set(&node->sram_ecc_flag, 0);
881 
882 		amdgpu_amdkfd_get_local_mem_info(kfd->adev,
883 					&node->local_mem_info, node->xcp);
884 
885 		if (kfd->adev->xcp_mgr)
886 			kfd_setup_interrupt_bitmap(node, i);
887 
888 		/* Initialize the KFD node */
889 		if (kfd_init_node(node)) {
890 			dev_err(kfd_device, "Error initializing KFD node\n");
891 			goto node_init_error;
892 		}
893 
894 		spin_lock_init(&node->watch_points_lock);
895 
896 		kfd->nodes[i] = node;
897 	}
898 
899 	svm_range_set_max_pages(kfd->adev);
900 
901 	kfd->init_complete = true;
902 	dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
903 		 kfd->adev->pdev->device);
904 
905 	pr_debug("Starting kfd with the following scheduling policy %d\n",
906 		node->dqm->sched_policy);
907 
908 	goto out;
909 
910 node_init_error:
911 node_alloc_error:
912 	kfd_cleanup_nodes(kfd, i);
913 	kfd_doorbell_fini(kfd);
914 kfd_doorbell_error:
915 	kfd_gtt_sa_fini(kfd);
916 kfd_gtt_sa_init_error:
917 	amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
918 alloc_gtt_mem_failure:
919 	dev_err(kfd_device,
920 		"device %x:%x NOT added due to errors\n",
921 		kfd->adev->pdev->vendor, kfd->adev->pdev->device);
922 out:
923 	return kfd->init_complete;
924 }
925 
926 void kgd2kfd_device_exit(struct kfd_dev *kfd)
927 {
928 	if (kfd->init_complete) {
929 		/* Cleanup KFD nodes */
930 		kfd_cleanup_nodes(kfd, kfd->num_nodes);
931 		/* Cleanup common/shared resources */
932 		kfd_doorbell_fini(kfd);
933 		ida_destroy(&kfd->doorbell_ida);
934 		kfd_gtt_sa_fini(kfd);
935 		amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
936 	}
937 
938 	kfree(kfd);
939 }
940 
941 int kgd2kfd_pre_reset(struct kfd_dev *kfd,
942 		      struct amdgpu_reset_context *reset_context)
943 {
944 	struct kfd_node *node;
945 	int i;
946 
947 	if (!kfd->init_complete)
948 		return 0;
949 
950 	for (i = 0; i < kfd->num_nodes; i++) {
951 		node = kfd->nodes[i];
952 		kfd_smi_event_update_gpu_reset(node, false, reset_context);
953 	}
954 
955 	kgd2kfd_suspend(kfd, true);
956 
957 	for (i = 0; i < kfd->num_nodes; i++)
958 		kfd_signal_reset_event(kfd->nodes[i]);
959 
960 	return 0;
961 }
962 
963 /*
964  * Fix me. KFD won't be able to resume existing process for now.
965  * We will keep all existing process in a evicted state and
966  * wait the process to be terminated.
967  */
968 
969 int kgd2kfd_post_reset(struct kfd_dev *kfd)
970 {
971 	int ret;
972 	struct kfd_node *node;
973 	int i;
974 
975 	if (!kfd->init_complete)
976 		return 0;
977 
978 	for (i = 0; i < kfd->num_nodes; i++) {
979 		ret = kfd_resume(kfd->nodes[i]);
980 		if (ret)
981 			return ret;
982 	}
983 
984 	mutex_lock(&kfd_processes_mutex);
985 	--kfd_locked;
986 	mutex_unlock(&kfd_processes_mutex);
987 
988 	for (i = 0; i < kfd->num_nodes; i++) {
989 		node = kfd->nodes[i];
990 		atomic_set(&node->sram_ecc_flag, 0);
991 		kfd_smi_event_update_gpu_reset(node, true, NULL);
992 	}
993 
994 	return 0;
995 }
996 
997 bool kfd_is_locked(struct kfd_dev *kfd)
998 {
999 	uint8_t id  = 0;
1000 	struct kfd_node *dev;
1001 
1002 	lockdep_assert_held(&kfd_processes_mutex);
1003 
1004 	/* check reset/suspend lock */
1005 	if (kfd_locked > 0)
1006 		return true;
1007 
1008 	if (kfd)
1009 		return kfd->kfd_dev_lock > 0;
1010 
1011 	/* check lock on all cgroup accessible devices */
1012 	while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
1013 		if (!dev || kfd_devcgroup_check_permission(dev))
1014 			continue;
1015 
1016 		if (dev->kfd->kfd_dev_lock > 0)
1017 			return true;
1018 	}
1019 
1020 	return false;
1021 }
1022 
1023 void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
1024 {
1025 	struct kfd_node *node;
1026 	int i;
1027 
1028 	if (!kfd->init_complete)
1029 		return;
1030 
1031 	if (suspend_proc)
1032 		kgd2kfd_suspend_process(kfd);
1033 
1034 	for (i = 0; i < kfd->num_nodes; i++) {
1035 		node = kfd->nodes[i];
1036 		node->dqm->ops.stop(node->dqm);
1037 	}
1038 }
1039 
1040 int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
1041 {
1042 	int ret, i;
1043 
1044 	if (!kfd->init_complete)
1045 		return 0;
1046 
1047 	for (i = 0; i < kfd->num_nodes; i++) {
1048 		ret = kfd_resume(kfd->nodes[i]);
1049 		if (ret)
1050 			return ret;
1051 	}
1052 
1053 	if (resume_proc)
1054 		ret = kgd2kfd_resume_process(kfd);
1055 
1056 	return ret;
1057 }
1058 
1059 void kgd2kfd_suspend_process(struct kfd_dev *kfd)
1060 {
1061 	if (!kfd->init_complete)
1062 		return;
1063 
1064 	mutex_lock(&kfd_processes_mutex);
1065 	/* For first KFD device suspend all the KFD processes */
1066 	if (++kfd_locked == 1)
1067 		kfd_suspend_all_processes();
1068 	mutex_unlock(&kfd_processes_mutex);
1069 }
1070 
1071 int kgd2kfd_resume_process(struct kfd_dev *kfd)
1072 {
1073 	int ret = 0;
1074 
1075 	if (!kfd->init_complete)
1076 		return 0;
1077 
1078 	mutex_lock(&kfd_processes_mutex);
1079 	if (--kfd_locked == 0)
1080 		ret = kfd_resume_all_processes();
1081 	WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
1082 	mutex_unlock(&kfd_processes_mutex);
1083 
1084 	return ret;
1085 }
1086 
1087 static int kfd_resume(struct kfd_node *node)
1088 {
1089 	int err = 0;
1090 
1091 	err = node->dqm->ops.start(node->dqm);
1092 	if (err)
1093 		dev_err(kfd_device,
1094 			"Error starting queue manager for device %x:%x\n",
1095 			node->adev->pdev->vendor, node->adev->pdev->device);
1096 
1097 	return err;
1098 }
1099 
1100 /* This is called directly from KGD at ISR. */
1101 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1102 {
1103 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
1104 	bool is_patched = false;
1105 	unsigned long flags;
1106 	struct kfd_node *node;
1107 
1108 	if (!kfd->init_complete)
1109 		return;
1110 
1111 	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
1112 		dev_err_once(kfd_device, "Ring entry too small\n");
1113 		return;
1114 	}
1115 
1116 	for (i = 0; i < kfd->num_nodes; i++) {
1117 		/* Race if another thread in b/w
1118 		 * kfd_cleanup_nodes and kfree(kfd),
1119 		 * when kfd->nodes[i] = NULL
1120 		 */
1121 		if (kfd->nodes[i])
1122 			node = kfd->nodes[i];
1123 		else
1124 			return;
1125 
1126 		spin_lock_irqsave(&node->interrupt_lock, flags);
1127 
1128 		if (node->interrupts_active
1129 		    && interrupt_is_wanted(node, ih_ring_entry,
1130 			    	patched_ihre, &is_patched)
1131 		    && enqueue_ih_ring_entry(node,
1132 			    	is_patched ? patched_ihre : ih_ring_entry)) {
1133 			queue_work(node->kfd->ih_wq, &node->interrupt_work);
1134 			spin_unlock_irqrestore(&node->interrupt_lock, flags);
1135 			return;
1136 		}
1137 		spin_unlock_irqrestore(&node->interrupt_lock, flags);
1138 	}
1139 
1140 }
1141 
1142 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
1143 {
1144 	struct kfd_process *p;
1145 	int r;
1146 
1147 	/* Because we are called from arbitrary context (workqueue) as opposed
1148 	 * to process context, kfd_process could attempt to exit while we are
1149 	 * running so the lookup function increments the process ref count.
1150 	 */
1151 	p = kfd_lookup_process_by_mm(mm);
1152 	if (!p)
1153 		return -ESRCH;
1154 
1155 	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1156 	r = kfd_process_evict_queues(p, trigger);
1157 
1158 	kfd_unref_process(p);
1159 	return r;
1160 }
1161 
1162 int kgd2kfd_resume_mm(struct mm_struct *mm)
1163 {
1164 	struct kfd_process *p;
1165 	int r;
1166 
1167 	/* Because we are called from arbitrary context (workqueue) as opposed
1168 	 * to process context, kfd_process could attempt to exit while we are
1169 	 * running so the lookup function increments the process ref count.
1170 	 */
1171 	p = kfd_lookup_process_by_mm(mm);
1172 	if (!p)
1173 		return -ESRCH;
1174 
1175 	r = kfd_process_restore_queues(p);
1176 
1177 	kfd_unref_process(p);
1178 	return r;
1179 }
1180 
1181 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1182  *   prepare for safe eviction of KFD BOs that belong to the specified
1183  *   process.
1184  *
1185  * @mm: mm_struct that identifies the specified KFD process
1186  * @fence: eviction fence attached to KFD process BOs
1187  *
1188  */
1189 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1190 					       struct dma_fence *fence)
1191 {
1192 	struct kfd_process *p;
1193 	unsigned long active_time;
1194 	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1195 
1196 	if (!fence)
1197 		return -EINVAL;
1198 
1199 	if (dma_fence_is_signaled(fence))
1200 		return 0;
1201 
1202 	p = kfd_lookup_process_by_mm(mm);
1203 	if (!p)
1204 		return -ENODEV;
1205 
1206 	if (fence->seqno == p->last_eviction_seqno)
1207 		goto out;
1208 
1209 	p->last_eviction_seqno = fence->seqno;
1210 
1211 	/* Avoid KFD process starvation. Wait for at least
1212 	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1213 	 */
1214 	active_time = get_jiffies_64() - p->last_restore_timestamp;
1215 	if (delay_jiffies > active_time)
1216 		delay_jiffies -= active_time;
1217 	else
1218 		delay_jiffies = 0;
1219 
1220 	/* During process initialization eviction_work.dwork is initialized
1221 	 * to kfd_evict_bo_worker
1222 	 */
1223 	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1224 	     p->lead_thread->pid, delay_jiffies);
1225 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
1226 out:
1227 	kfd_unref_process(p);
1228 	return 0;
1229 }
1230 
1231 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1232 				unsigned int chunk_size)
1233 {
1234 	if (WARN_ON(buf_size < chunk_size))
1235 		return -EINVAL;
1236 	if (WARN_ON(buf_size == 0))
1237 		return -EINVAL;
1238 	if (WARN_ON(chunk_size == 0))
1239 		return -EINVAL;
1240 
1241 	kfd->gtt_sa_chunk_size = chunk_size;
1242 	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1243 
1244 	kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1245 					   GFP_KERNEL);
1246 	if (!kfd->gtt_sa_bitmap)
1247 		return -ENOMEM;
1248 
1249 	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1250 			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1251 
1252 	mutex_init(&kfd->gtt_sa_lock);
1253 
1254 	return 0;
1255 }
1256 
1257 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1258 {
1259 	mutex_destroy(&kfd->gtt_sa_lock);
1260 	bitmap_free(kfd->gtt_sa_bitmap);
1261 }
1262 
1263 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1264 						unsigned int bit_num,
1265 						unsigned int chunk_size)
1266 {
1267 	return start_addr + bit_num * chunk_size;
1268 }
1269 
1270 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1271 						unsigned int bit_num,
1272 						unsigned int chunk_size)
1273 {
1274 	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1275 }
1276 
1277 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1278 			struct kfd_mem_obj **mem_obj)
1279 {
1280 	unsigned int found, start_search, cur_size;
1281 	struct kfd_dev *kfd = node->kfd;
1282 
1283 	if (size == 0)
1284 		return -EINVAL;
1285 
1286 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1287 		return -ENOMEM;
1288 
1289 	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1290 	if (!(*mem_obj))
1291 		return -ENOMEM;
1292 
1293 	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1294 
1295 	start_search = 0;
1296 
1297 	mutex_lock(&kfd->gtt_sa_lock);
1298 
1299 kfd_gtt_restart_search:
1300 	/* Find the first chunk that is free */
1301 	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1302 					kfd->gtt_sa_num_of_chunks,
1303 					start_search);
1304 
1305 	pr_debug("Found = %d\n", found);
1306 
1307 	/* If there wasn't any free chunk, bail out */
1308 	if (found == kfd->gtt_sa_num_of_chunks)
1309 		goto kfd_gtt_no_free_chunk;
1310 
1311 	/* Update fields of mem_obj */
1312 	(*mem_obj)->range_start = found;
1313 	(*mem_obj)->range_end = found;
1314 	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1315 					kfd->gtt_start_gpu_addr,
1316 					found,
1317 					kfd->gtt_sa_chunk_size);
1318 	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1319 					kfd->gtt_start_cpu_ptr,
1320 					found,
1321 					kfd->gtt_sa_chunk_size);
1322 
1323 	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1324 			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1325 
1326 	/* If we need only one chunk, mark it as allocated and get out */
1327 	if (size <= kfd->gtt_sa_chunk_size) {
1328 		pr_debug("Single bit\n");
1329 		__set_bit(found, kfd->gtt_sa_bitmap);
1330 		goto kfd_gtt_out;
1331 	}
1332 
1333 	/* Otherwise, try to see if we have enough contiguous chunks */
1334 	cur_size = size - kfd->gtt_sa_chunk_size;
1335 	do {
1336 		(*mem_obj)->range_end =
1337 			find_next_zero_bit(kfd->gtt_sa_bitmap,
1338 					kfd->gtt_sa_num_of_chunks, ++found);
1339 		/*
1340 		 * If next free chunk is not contiguous than we need to
1341 		 * restart our search from the last free chunk we found (which
1342 		 * wasn't contiguous to the previous ones
1343 		 */
1344 		if ((*mem_obj)->range_end != found) {
1345 			start_search = found;
1346 			goto kfd_gtt_restart_search;
1347 		}
1348 
1349 		/*
1350 		 * If we reached end of buffer, bail out with error
1351 		 */
1352 		if (found == kfd->gtt_sa_num_of_chunks)
1353 			goto kfd_gtt_no_free_chunk;
1354 
1355 		/* Check if we don't need another chunk */
1356 		if (cur_size <= kfd->gtt_sa_chunk_size)
1357 			cur_size = 0;
1358 		else
1359 			cur_size -= kfd->gtt_sa_chunk_size;
1360 
1361 	} while (cur_size > 0);
1362 
1363 	pr_debug("range_start = %d, range_end = %d\n",
1364 		(*mem_obj)->range_start, (*mem_obj)->range_end);
1365 
1366 	/* Mark the chunks as allocated */
1367 	bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1368 		   (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1369 
1370 kfd_gtt_out:
1371 	mutex_unlock(&kfd->gtt_sa_lock);
1372 	return 0;
1373 
1374 kfd_gtt_no_free_chunk:
1375 	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1376 	mutex_unlock(&kfd->gtt_sa_lock);
1377 	kfree(*mem_obj);
1378 	return -ENOMEM;
1379 }
1380 
1381 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
1382 {
1383 	struct kfd_dev *kfd = node->kfd;
1384 
1385 	/* Act like kfree when trying to free a NULL object */
1386 	if (!mem_obj)
1387 		return 0;
1388 
1389 	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1390 			mem_obj, mem_obj->range_start, mem_obj->range_end);
1391 
1392 	mutex_lock(&kfd->gtt_sa_lock);
1393 
1394 	/* Mark the chunks as free */
1395 	bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1396 		     mem_obj->range_end - mem_obj->range_start + 1);
1397 
1398 	mutex_unlock(&kfd->gtt_sa_lock);
1399 
1400 	kfree(mem_obj);
1401 	return 0;
1402 }
1403 
1404 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1405 {
1406 	/*
1407 	 * TODO: Currently update SRAM ECC flag for first node.
1408 	 * This needs to be updated later when we can
1409 	 * identify SRAM ECC error on other nodes also.
1410 	 */
1411 	if (kfd)
1412 		atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
1413 }
1414 
1415 void kfd_inc_compute_active(struct kfd_node *node)
1416 {
1417 	if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1418 		amdgpu_amdkfd_set_compute_idle(node->adev, false);
1419 }
1420 
1421 void kfd_dec_compute_active(struct kfd_node *node)
1422 {
1423 	int count = atomic_dec_return(&node->kfd->compute_profile);
1424 
1425 	if (count == 0)
1426 		amdgpu_amdkfd_set_compute_idle(node->adev, true);
1427 	WARN_ONCE(count < 0, "Compute profile ref. count error");
1428 }
1429 
1430 static bool kfd_compute_active(struct kfd_node *node)
1431 {
1432 	if (atomic_read(&node->kfd->compute_profile))
1433 		return true;
1434 	return false;
1435 }
1436 
1437 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1438 {
1439 	/*
1440 	 * TODO: For now, raise the throttling event only on first node.
1441 	 * This will need to change after we are able to determine
1442 	 * which node raised the throttling event.
1443 	 */
1444 	if (kfd && kfd->init_complete)
1445 		kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
1446 							throttle_bitmask);
1447 }
1448 
1449 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1450  * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1451  * When the device has more than two engines, we reserve two for PCIe to enable
1452  * full-duplex and the rest are used as XGMI.
1453  */
1454 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1455 {
1456 	/* If XGMI is not supported, all SDMA engines are PCIe */
1457 	if (!node->adev->gmc.xgmi.supported)
1458 		return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
1459 
1460 	return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
1461 }
1462 
1463 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1464 {
1465 	/* After reserved for PCIe, the rest of engines are XGMI */
1466 	return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
1467 		kfd_get_num_sdma_engines(node);
1468 }
1469 
1470 int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
1471 {
1472 	struct kfd_process *p;
1473 	int r = 0, temp, idx;
1474 
1475 	mutex_lock(&kfd_processes_mutex);
1476 
1477 	/* kfd_processes_count is per kfd_dev, return -EBUSY without
1478 	 * further check
1479 	 */
1480 	if (!!atomic_read(&kfd->kfd_processes_count)) {
1481 		pr_debug("process_wq_release not finished\n");
1482 		r = -EBUSY;
1483 		goto out;
1484 	}
1485 
1486 	if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
1487 		goto out;
1488 
1489 	/* fail under system reset/resume or kfd device is partition switching. */
1490 	if (kfd_is_locked(kfd)) {
1491 		r = -EBUSY;
1492 		goto out;
1493 	}
1494 
1495 	/*
1496 	 * ensure all running processes are cgroup excluded from device before mode switch.
1497 	 * i.e. no pdd was created on the process socket.
1498 	 */
1499 	idx = srcu_read_lock(&kfd_processes_srcu);
1500 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1501 		int i;
1502 
1503 		for (i = 0; i < p->n_pdds; i++) {
1504 			if (p->pdds[i]->dev->kfd != kfd)
1505 				continue;
1506 
1507 			r = -EBUSY;
1508 			goto proc_check_unlock;
1509 		}
1510 	}
1511 
1512 proc_check_unlock:
1513 	srcu_read_unlock(&kfd_processes_srcu, idx);
1514 out:
1515 	if (!r)
1516 		++kfd->kfd_dev_lock;
1517 	mutex_unlock(&kfd_processes_mutex);
1518 
1519 	return r;
1520 }
1521 
1522 void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
1523 {
1524 	mutex_lock(&kfd_processes_mutex);
1525 	--kfd->kfd_dev_lock;
1526 	mutex_unlock(&kfd_processes_mutex);
1527 }
1528 
1529 int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
1530 {
1531 	struct kfd_node *node;
1532 	int ret;
1533 
1534 	if (!kfd->init_complete)
1535 		return 0;
1536 
1537 	if (node_id >= kfd->num_nodes) {
1538 		dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1539 			 node_id, kfd->num_nodes - 1);
1540 		return -EINVAL;
1541 	}
1542 	node = kfd->nodes[node_id];
1543 
1544 	ret = node->dqm->ops.unhalt(node->dqm);
1545 	if (ret)
1546 		dev_err(kfd_device, "Error in starting scheduler\n");
1547 
1548 	return ret;
1549 }
1550 
1551 int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
1552 {
1553 	struct kfd_node *node;
1554 	int i, r;
1555 
1556 	if (!kfd->init_complete)
1557 		return 0;
1558 
1559 	for (i = 0; i < kfd->num_nodes; i++) {
1560 		node = kfd->nodes[i];
1561 		r = node->dqm->ops.unhalt(node->dqm);
1562 		if (r) {
1563 			dev_err(kfd_device, "Error in starting scheduler\n");
1564 			return r;
1565 		}
1566 	}
1567 	return 0;
1568 }
1569 
1570 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
1571 {
1572 	struct kfd_node *node;
1573 
1574 	if (!kfd->init_complete)
1575 		return 0;
1576 
1577 	if (node_id >= kfd->num_nodes) {
1578 		dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1579 			 node_id, kfd->num_nodes - 1);
1580 		return -EINVAL;
1581 	}
1582 
1583 	node = kfd->nodes[node_id];
1584 	return node->dqm->ops.halt(node->dqm);
1585 }
1586 
1587 int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
1588 {
1589 	struct kfd_node *node;
1590 	int i, r;
1591 
1592 	if (!kfd->init_complete)
1593 		return 0;
1594 
1595 	for (i = 0; i < kfd->num_nodes; i++) {
1596 		node = kfd->nodes[i];
1597 		r = node->dqm->ops.halt(node->dqm);
1598 		if (r)
1599 			return r;
1600 	}
1601 	return 0;
1602 }
1603 
1604 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
1605 {
1606 	struct kfd_node *node;
1607 
1608 	if (!kfd->init_complete)
1609 		return false;
1610 
1611 	if (node_id >= kfd->num_nodes) {
1612 		dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1613 			 node_id, kfd->num_nodes - 1);
1614 		return false;
1615 	}
1616 
1617 	node = kfd->nodes[node_id];
1618 
1619 	return kfd_compute_active(node);
1620 }
1621 
1622 /**
1623  * kgd2kfd_vmfault_fast_path() - KFD vm page fault interrupt handling fast path for gmc v9
1624  * @adev: amdgpu device
1625  * @entry: vm fault interrupt vector
1626  * @retry_fault: if this is retry fault
1627  *
1628  * retry fault -
1629  *    with CAM enabled, adev primary ring
1630  *                           |  gmc_v9_0_process_interrupt()
1631  *                      adev soft_ring
1632  *                           |  gmc_v9_0_process_interrupt() worker failed to recover page fault
1633  *                      KFD node ih_fifo
1634  *                           |  KFD interrupt_wq worker
1635  *                      kfd_signal_vm_fault_event
1636  *
1637  *    without CAM,      adev primary ring1
1638  *                           |  gmc_v9_0_process_interrupt worker failed to recvoer page fault
1639  *                      KFD node ih_fifo
1640  *                           |  KFD interrupt_wq worker
1641  *                      kfd_signal_vm_fault_event
1642  *
1643  * no-retry fault -
1644  *                      adev primary ring
1645  *                           |  gmc_v9_0_process_interrupt()
1646  *                      KFD node ih_fifo
1647  *                           |  KFD interrupt_wq worker
1648  *                      kfd_signal_vm_fault_event
1649  *
1650  * fast path - After kfd_signal_vm_fault_event, gmc_v9_0_process_interrupt drop the page fault
1651  *            of same process, don't copy interrupt to KFD node ih_fifo.
1652  *            With gdb debugger enabled, need convert the retry fault to no-retry fault for
1653  *            debugger, cannot use the fast path.
1654  *
1655  * Return:
1656  *   true - use the fast path to handle this fault
1657  *   false - use normal path to handle it
1658  */
1659 bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
1660 			       bool retry_fault)
1661 {
1662 	struct kfd_process *p;
1663 	u32 cam_index;
1664 
1665 	if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
1666 		p = kfd_lookup_process_by_pasid(entry->pasid, NULL);
1667 		if (!p)
1668 			return true;
1669 
1670 		if (p->gpu_page_fault && !p->debug_trap_enabled) {
1671 			if (retry_fault && adev->irq.retry_cam_enabled) {
1672 				cam_index = entry->src_data[2] & 0x3ff;
1673 				WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
1674 			}
1675 
1676 			kfd_unref_process(p);
1677 			return true;
1678 		}
1679 
1680 		/*
1681 		 * This is the first page fault, set flag and then signal user space
1682 		 */
1683 		p->gpu_page_fault = true;
1684 		kfd_unref_process(p);
1685 	}
1686 	return false;
1687 }
1688 
1689 #if defined(CONFIG_DEBUG_FS)
1690 
1691 /* This function will send a package to HIQ to hang the HWS
1692  * which will trigger a GPU reset and bring the HWS back to normal state
1693  */
1694 int kfd_debugfs_hang_hws(struct kfd_node *dev)
1695 {
1696 	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1697 		pr_err("HWS is not enabled");
1698 		return -EINVAL;
1699 	}
1700 
1701 	if (dev->kfd->shared_resources.enable_mes) {
1702 		dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
1703 		return -EINVAL;
1704 	}
1705 
1706 	return dqm_debugfs_hang_hws(dev->dqm);
1707 }
1708 
1709 #endif
1710