xref: /linux/drivers/gpu/drm/amd/amdkfd/kfd_device.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include "kfd_priv.h"
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "amdgpu_amdkfd.h"
33 #include "kfd_smi_events.h"
34 #include "kfd_svm.h"
35 #include "kfd_migrate.h"
36 #include "amdgpu.h"
37 #include "amdgpu_xcp.h"
38 
39 #define MQD_SIZE_ALIGNED 768
40 
41 /*
42  * kfd_locked is used to lock the kfd driver during suspend or reset
43  * once locked, kfd driver will stop any further GPU execution.
44  * create process (open) will return -EAGAIN.
45  */
46 static int kfd_locked;
47 
48 #ifdef CONFIG_DRM_AMDGPU_CIK
49 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
50 #endif
51 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
52 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
53 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
54 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
55 extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
56 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
57 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
58 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
59 extern const struct kfd2kgd_calls gfx_v12_kfd2kgd;
60 extern const struct kfd2kgd_calls gfx_v12_1_kfd2kgd;
61 
62 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
63 				unsigned int chunk_size);
64 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
65 
66 static int kfd_resume(struct kfd_node *kfd);
67 
68 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
69 {
70 	uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);
71 
72 	switch (sdma_version) {
73 	case IP_VERSION(4, 0, 0):/* VEGA10 */
74 	case IP_VERSION(4, 0, 1):/* VEGA12 */
75 	case IP_VERSION(4, 1, 0):/* RAVEN */
76 	case IP_VERSION(4, 1, 1):/* RAVEN */
77 	case IP_VERSION(4, 1, 2):/* RENOIR */
78 	case IP_VERSION(5, 2, 1):/* VANGOGH */
79 	case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
80 	case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
81 	case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
82 		kfd->device_info.num_sdma_queues_per_engine = 2;
83 		break;
84 	case IP_VERSION(4, 2, 0):/* VEGA20 */
85 	case IP_VERSION(4, 2, 2):/* ARCTURUS */
86 	case IP_VERSION(4, 4, 0):/* ALDEBARAN */
87 	case IP_VERSION(4, 4, 2):
88 	case IP_VERSION(4, 4, 5):
89 	case IP_VERSION(4, 4, 4):
90 	case IP_VERSION(5, 0, 0):/* NAVI10 */
91 	case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
92 	case IP_VERSION(5, 0, 2):/* NAVI14 */
93 	case IP_VERSION(5, 0, 5):/* NAVI12 */
94 	case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
95 	case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
96 	case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
97 	case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
98 		kfd->device_info.num_sdma_queues_per_engine = 8;
99 		break;
100 	case IP_VERSION(6, 0, 0):
101 	case IP_VERSION(6, 0, 1):
102 	case IP_VERSION(6, 0, 2):
103 	case IP_VERSION(6, 0, 3):
104 	case IP_VERSION(6, 1, 0):
105 	case IP_VERSION(6, 1, 1):
106 	case IP_VERSION(6, 1, 2):
107 	case IP_VERSION(6, 1, 3):
108 	case IP_VERSION(6, 1, 4):
109 	case IP_VERSION(7, 0, 0):
110 	case IP_VERSION(7, 0, 1):
111 	case IP_VERSION(7, 1, 0):
112 		kfd->device_info.num_sdma_queues_per_engine = 8;
113 		/* Reserve 1 for paging and 1 for gfx */
114 		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
115 		break;
116 	default:
117 		dev_warn(kfd_device,
118 			"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
119 			sdma_version);
120 		kfd->device_info.num_sdma_queues_per_engine = 8;
121 	}
122 }
123 
124 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
125 {
126 	uint32_t gc_version = KFD_GC_VERSION(kfd);
127 
128 	switch (gc_version) {
129 	case IP_VERSION(9, 0, 1): /* VEGA10 */
130 	case IP_VERSION(9, 1, 0): /* RAVEN */
131 	case IP_VERSION(9, 2, 1): /* VEGA12 */
132 	case IP_VERSION(9, 2, 2): /* RAVEN */
133 	case IP_VERSION(9, 3, 0): /* RENOIR */
134 	case IP_VERSION(9, 4, 0): /* VEGA20 */
135 	case IP_VERSION(9, 4, 1): /* ARCTURUS */
136 	case IP_VERSION(9, 4, 2): /* ALDEBARAN */
137 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
138 		break;
139 	case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
140 	case IP_VERSION(9, 4, 4): /* GC 9.4.4 */
141 	case IP_VERSION(9, 5, 0): /* GC 9.5.0 */
142 		kfd->device_info.event_interrupt_class =
143 						&event_interrupt_class_v9_4_3;
144 		break;
145 	case IP_VERSION(10, 3, 1): /* VANGOGH */
146 	case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
147 	case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
148 	case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
149 	case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
150 	case IP_VERSION(10, 1, 4):
151 	case IP_VERSION(10, 1, 10): /* NAVI10 */
152 	case IP_VERSION(10, 1, 2): /* NAVI12 */
153 	case IP_VERSION(10, 1, 1): /* NAVI14 */
154 	case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
155 	case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
156 	case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
157 	case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
158 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
159 		break;
160 	case IP_VERSION(11, 0, 0):
161 	case IP_VERSION(11, 0, 1):
162 	case IP_VERSION(11, 0, 2):
163 	case IP_VERSION(11, 0, 3):
164 	case IP_VERSION(11, 0, 4):
165 	case IP_VERSION(11, 5, 0):
166 	case IP_VERSION(11, 5, 1):
167 	case IP_VERSION(11, 5, 2):
168 	case IP_VERSION(11, 5, 3):
169 	case IP_VERSION(11, 5, 4):
170 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
171 		break;
172 	case IP_VERSION(12, 0, 0):
173 	case IP_VERSION(12, 0, 1):
174 		/* GFX12_TODO: Change to v12 version. */
175 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
176 		break;
177 	case IP_VERSION(12, 1, 0):
178 		kfd->device_info.event_interrupt_class =
179 						&event_interrupt_class_v12_1;
180 		break;
181 	default:
182 		dev_warn(kfd_device, "v9 event interrupt handler is set due to "
183 			"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
184 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
185 	}
186 }
187 
188 static void kfd_device_info_init(struct kfd_dev *kfd,
189 				 bool vf, uint32_t gfx_target_version)
190 {
191 	uint32_t gc_version = KFD_GC_VERSION(kfd);
192 	uint32_t asic_type = kfd->adev->asic_type;
193 
194 	kfd->device_info.max_pasid_bits = 16;
195 	kfd->device_info.max_no_of_hqd = 24;
196 	kfd->device_info.num_of_watch_points = 4;
197 	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
198 	kfd->device_info.gfx_target_version = gfx_target_version;
199 
200 	if (KFD_IS_SOC15(kfd)) {
201 		kfd->device_info.doorbell_size = 8;
202 		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
203 		kfd->device_info.supports_cwsr = true;
204 
205 		kfd_device_info_set_sdma_info(kfd);
206 
207 		kfd_device_info_set_event_interrupt_class(kfd);
208 
209 		if (gc_version < IP_VERSION(11, 0, 0)) {
210 			/* Navi2x+, Navi1x+ */
211 			if (gc_version == IP_VERSION(10, 3, 6))
212 				kfd->device_info.no_atomic_fw_version = 14;
213 			else if (gc_version == IP_VERSION(10, 3, 7))
214 				kfd->device_info.no_atomic_fw_version = 3;
215 			else if (gc_version >= IP_VERSION(10, 3, 0))
216 				kfd->device_info.no_atomic_fw_version = 92;
217 			else if (gc_version >= IP_VERSION(10, 1, 1))
218 				kfd->device_info.no_atomic_fw_version = 145;
219 
220 			/* Navi1x+ */
221 			if (gc_version >= IP_VERSION(10, 1, 1))
222 				kfd->device_info.needs_pci_atomics = true;
223 		} else if (gc_version < IP_VERSION(12, 0, 0)) {
224 			/*
225 			 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
226 			 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
227 			 * PCIe atomics support.
228 			 */
229 			kfd->device_info.needs_pci_atomics = true;
230 			kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
231 		} else if (gc_version < IP_VERSION(13, 0, 0)) {
232 			kfd->device_info.needs_pci_atomics = true;
233 			kfd->device_info.no_atomic_fw_version = 2090;
234 		} else {
235 			kfd->device_info.needs_pci_atomics = true;
236 		}
237 	} else {
238 		kfd->device_info.doorbell_size = 4;
239 		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
240 		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
241 		kfd->device_info.num_sdma_queues_per_engine = 2;
242 
243 		if (asic_type != CHIP_KAVERI &&
244 		    asic_type != CHIP_HAWAII &&
245 		    asic_type != CHIP_TONGA)
246 			kfd->device_info.supports_cwsr = true;
247 
248 		if (asic_type != CHIP_HAWAII && !vf)
249 			kfd->device_info.needs_pci_atomics = true;
250 	}
251 }
252 
253 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
254 {
255 	struct kfd_dev *kfd = NULL;
256 	const struct kfd2kgd_calls *f2g = NULL;
257 	uint32_t gfx_target_version = 0;
258 
259 	switch (adev->asic_type) {
260 #ifdef CONFIG_DRM_AMDGPU_CIK
261 	case CHIP_KAVERI:
262 		gfx_target_version = 70000;
263 		if (!vf)
264 			f2g = &gfx_v7_kfd2kgd;
265 		break;
266 #endif
267 	case CHIP_CARRIZO:
268 		gfx_target_version = 80001;
269 		if (!vf)
270 			f2g = &gfx_v8_kfd2kgd;
271 		break;
272 #ifdef CONFIG_DRM_AMDGPU_CIK
273 	case CHIP_HAWAII:
274 		gfx_target_version = 70001;
275 		if (!amdgpu_exp_hw_support)
276 			pr_info(
277 	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
278 				);
279 		else if (!vf)
280 			f2g = &gfx_v7_kfd2kgd;
281 		break;
282 #endif
283 	case CHIP_TONGA:
284 		gfx_target_version = 80002;
285 		if (!vf)
286 			f2g = &gfx_v8_kfd2kgd;
287 		break;
288 	case CHIP_FIJI:
289 	case CHIP_POLARIS10:
290 		gfx_target_version = 80003;
291 		f2g = &gfx_v8_kfd2kgd;
292 		break;
293 	case CHIP_POLARIS11:
294 	case CHIP_POLARIS12:
295 	case CHIP_VEGAM:
296 		gfx_target_version = 80003;
297 		if (!vf)
298 			f2g = &gfx_v8_kfd2kgd;
299 		break;
300 	default:
301 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
302 		/* Vega 10 */
303 		case IP_VERSION(9, 0, 1):
304 			gfx_target_version = 90000;
305 			f2g = &gfx_v9_kfd2kgd;
306 			break;
307 		/* Raven */
308 		case IP_VERSION(9, 1, 0):
309 		case IP_VERSION(9, 2, 2):
310 			gfx_target_version = 90002;
311 			if (!vf)
312 				f2g = &gfx_v9_kfd2kgd;
313 			break;
314 		/* Vega12 */
315 		case IP_VERSION(9, 2, 1):
316 			gfx_target_version = 90004;
317 			if (!vf)
318 				f2g = &gfx_v9_kfd2kgd;
319 			break;
320 		/* Renoir */
321 		case IP_VERSION(9, 3, 0):
322 			gfx_target_version = 90012;
323 			if (!vf)
324 				f2g = &gfx_v9_kfd2kgd;
325 			break;
326 		/* Vega20 */
327 		case IP_VERSION(9, 4, 0):
328 			gfx_target_version = 90006;
329 			if (!vf)
330 				f2g = &gfx_v9_kfd2kgd;
331 			break;
332 		/* Arcturus */
333 		case IP_VERSION(9, 4, 1):
334 			gfx_target_version = 90008;
335 			f2g = &arcturus_kfd2kgd;
336 			break;
337 		/* Aldebaran */
338 		case IP_VERSION(9, 4, 2):
339 			gfx_target_version = 90010;
340 			f2g = &aldebaran_kfd2kgd;
341 			break;
342 		case IP_VERSION(9, 4, 3):
343 		case IP_VERSION(9, 4, 4):
344 			gfx_target_version = 90402;
345 			f2g = &gc_9_4_3_kfd2kgd;
346 			break;
347 		case IP_VERSION(9, 5, 0):
348 			gfx_target_version = 90500;
349 			f2g = &gc_9_4_3_kfd2kgd;
350 			break;
351 		/* Navi10 */
352 		case IP_VERSION(10, 1, 10):
353 			gfx_target_version = 100100;
354 			if (!vf)
355 				f2g = &gfx_v10_kfd2kgd;
356 			break;
357 		/* Navi12 */
358 		case IP_VERSION(10, 1, 2):
359 			gfx_target_version = 100101;
360 			f2g = &gfx_v10_kfd2kgd;
361 			break;
362 		/* Navi14 */
363 		case IP_VERSION(10, 1, 1):
364 			gfx_target_version = 100102;
365 			if (!vf)
366 				f2g = &gfx_v10_kfd2kgd;
367 			break;
368 		/* Cyan Skillfish */
369 		case IP_VERSION(10, 1, 3):
370 		case IP_VERSION(10, 1, 4):
371 			gfx_target_version = 100103;
372 			if (!vf)
373 				f2g = &gfx_v10_kfd2kgd;
374 			break;
375 		/* Sienna Cichlid */
376 		case IP_VERSION(10, 3, 0):
377 			gfx_target_version = 100300;
378 			f2g = &gfx_v10_3_kfd2kgd;
379 			break;
380 		/* Navy Flounder */
381 		case IP_VERSION(10, 3, 2):
382 			gfx_target_version = 100301;
383 			f2g = &gfx_v10_3_kfd2kgd;
384 			break;
385 		/* Van Gogh */
386 		case IP_VERSION(10, 3, 1):
387 			gfx_target_version = 100303;
388 			if (!vf)
389 				f2g = &gfx_v10_3_kfd2kgd;
390 			break;
391 		/* Dimgrey Cavefish */
392 		case IP_VERSION(10, 3, 4):
393 			gfx_target_version = 100302;
394 			f2g = &gfx_v10_3_kfd2kgd;
395 			break;
396 		/* Beige Goby */
397 		case IP_VERSION(10, 3, 5):
398 			gfx_target_version = 100304;
399 			f2g = &gfx_v10_3_kfd2kgd;
400 			break;
401 		/* Yellow Carp */
402 		case IP_VERSION(10, 3, 3):
403 			gfx_target_version = 100305;
404 			if (!vf)
405 				f2g = &gfx_v10_3_kfd2kgd;
406 			break;
407 		case IP_VERSION(10, 3, 6):
408 		case IP_VERSION(10, 3, 7):
409 			gfx_target_version = 100306;
410 			if (!vf)
411 				f2g = &gfx_v10_3_kfd2kgd;
412 			break;
413 		case IP_VERSION(11, 0, 0):
414 			gfx_target_version = 110000;
415 			f2g = &gfx_v11_kfd2kgd;
416 			break;
417 		case IP_VERSION(11, 0, 1):
418 		case IP_VERSION(11, 0, 4):
419 			gfx_target_version = 110003;
420 			f2g = &gfx_v11_kfd2kgd;
421 			break;
422 		case IP_VERSION(11, 0, 2):
423 			gfx_target_version = 110002;
424 			f2g = &gfx_v11_kfd2kgd;
425 			break;
426 		case IP_VERSION(11, 0, 3):
427 			/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
428 			gfx_target_version = 110001;
429 			f2g = &gfx_v11_kfd2kgd;
430 			break;
431 		case IP_VERSION(11, 5, 0):
432 			gfx_target_version = 110500;
433 			f2g = &gfx_v11_kfd2kgd;
434 			break;
435 		case IP_VERSION(11, 5, 1):
436 			gfx_target_version = 110501;
437 			f2g = &gfx_v11_kfd2kgd;
438 			break;
439 		case IP_VERSION(11, 5, 2):
440 			gfx_target_version = 110502;
441 			f2g = &gfx_v11_kfd2kgd;
442 			break;
443 		case IP_VERSION(11, 5, 3):
444 			gfx_target_version = 110503;
445 			f2g = &gfx_v11_kfd2kgd;
446 			break;
447 		case IP_VERSION(11, 5, 4):
448                         gfx_target_version = 110504;
449                         f2g = &gfx_v11_kfd2kgd;
450                         break;
451 		case IP_VERSION(12, 0, 0):
452 			gfx_target_version = 120000;
453 			f2g = &gfx_v12_kfd2kgd;
454 			break;
455 		case IP_VERSION(12, 0, 1):
456 			gfx_target_version = 120001;
457 			f2g = &gfx_v12_kfd2kgd;
458 			break;
459 		case IP_VERSION(12, 1, 0):
460 			gfx_target_version = 120500;
461 			f2g = &gfx_v12_1_kfd2kgd;
462 			break;
463 		default:
464 			break;
465 		}
466 		break;
467 	}
468 
469 	if (!f2g) {
470 		if (amdgpu_ip_version(adev, GC_HWIP, 0))
471 			dev_info(kfd_device,
472 				"GC IP %06x %s not supported in kfd\n",
473 				amdgpu_ip_version(adev, GC_HWIP, 0),
474 				vf ? "VF" : "");
475 		else
476 			dev_info(kfd_device, "%s %s not supported in kfd\n",
477 				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
478 		return NULL;
479 	}
480 
481 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
482 	if (!kfd)
483 		return NULL;
484 
485 	kfd->adev = adev;
486 	kfd_device_info_init(kfd, vf, gfx_target_version);
487 	kfd->init_complete = false;
488 	kfd->kfd2kgd = f2g;
489 	atomic_set(&kfd->compute_profile, 0);
490 
491 	mutex_init(&kfd->doorbell_mutex);
492 
493 	ida_init(&kfd->doorbell_ida);
494 	atomic_set(&kfd->kfd_processes_count, 0);
495 
496 	return kfd;
497 }
498 
499 static void kfd_cwsr_init(struct kfd_dev *kfd)
500 {
501 	if (cwsr_enable && kfd->device_info.supports_cwsr) {
502 		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
503 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex)
504 					     > KFD_CWSR_TMA_OFFSET);
505 			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
506 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
507 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
508 			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex)
509 					     > KFD_CWSR_TMA_OFFSET);
510 			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
511 			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
512 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
513 			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex)
514 					     > KFD_CWSR_TMA_OFFSET);
515 			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
516 			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
517 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
518 			   KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) {
519 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex)
520 					     > KFD_CWSR_TMA_OFFSET);
521 			kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
522 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
523 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) {
524 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
525 			kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex;
526 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex);
527 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
528 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
529 					     > KFD_CWSR_TMA_OFFSET);
530 			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
531 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
532 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
533 			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex)
534 					     > KFD_CWSR_TMA_OFFSET);
535 			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
536 			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
537 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
538 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex)
539 					     > KFD_CWSR_TMA_OFFSET);
540 			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
541 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
542 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(12, 0, 0)) {
543 			/* The gfx11 cwsr trap handler must fit inside a single
544 			   page. */
545 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
546 			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
547 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
548 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(12, 1, 0)) {
549 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
550 					     > KFD_CWSR_TMA_OFFSET);
551 			kfd->cwsr_isa = cwsr_trap_gfx12_hex;
552 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
553 		} else {
554 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_1_0_hex)
555 					     > KFD_CWSR_TMA_OFFSET);
556 			kfd->cwsr_isa = cwsr_trap_gfx12_1_0_hex;
557 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_1_0_hex);
558 		}
559 
560 		kfd->cwsr_enabled = true;
561 	}
562 }
563 
564 static int kfd_gws_init(struct kfd_node *node)
565 {
566 	int ret = 0;
567 	struct kfd_dev *kfd = node->kfd;
568 	uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
569 
570 	if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
571 		return 0;
572 
573 	if (hws_gws_support || (KFD_IS_SOC15(node) &&
574 		((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
575 			&& kfd->mec2_fw_version >= 0x81b3) ||
576 		(KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
577 			&& kfd->mec2_fw_version >= 0x1b3)  ||
578 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
579 			&& kfd->mec2_fw_version >= 0x30)   ||
580 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
581 			&& kfd->mec2_fw_version >= 0x28) ||
582 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
583 		 KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
584 		(KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
585 		(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
586 			&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
587 			&& kfd->mec2_fw_version >= 0x6b) ||
588 		(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
589 			&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
590 			&& mes_rev >= 68) ||
591 		(KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))))) {
592 		if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
593 			node->adev->gds.gws_size = 64;
594 		ret = amdgpu_amdkfd_alloc_gws(node->adev,
595 				node->adev->gds.gws_size, &node->gws);
596 	}
597 
598 	return ret;
599 }
600 
601 static void kfd_smi_init(struct kfd_node *dev)
602 {
603 	INIT_LIST_HEAD(&dev->smi_clients);
604 	spin_lock_init(&dev->smi_lock);
605 }
606 
607 static int kfd_init_node(struct kfd_node *node)
608 {
609 	int err = -1;
610 
611 	if (kfd_interrupt_init(node)) {
612 		dev_err(kfd_device, "Error initializing interrupts\n");
613 		goto kfd_interrupt_error;
614 	}
615 
616 	node->dqm = device_queue_manager_init(node);
617 	if (!node->dqm) {
618 		dev_err(kfd_device, "Error initializing queue manager\n");
619 		goto device_queue_manager_error;
620 	}
621 
622 	if (kfd_gws_init(node)) {
623 		dev_err(kfd_device, "Could not allocate %d gws\n",
624 			node->adev->gds.gws_size);
625 		goto gws_error;
626 	}
627 
628 	if (kfd_resume(node))
629 		goto kfd_resume_error;
630 
631 	if (kfd_topology_add_device(node)) {
632 		dev_err(kfd_device, "Error adding device to topology\n");
633 		goto kfd_topology_add_device_error;
634 	}
635 
636 	kfd_smi_init(node);
637 
638 	return 0;
639 
640 kfd_topology_add_device_error:
641 kfd_resume_error:
642 gws_error:
643 	device_queue_manager_uninit(node->dqm);
644 device_queue_manager_error:
645 	kfd_interrupt_exit(node);
646 kfd_interrupt_error:
647 	if (node->gws)
648 		amdgpu_amdkfd_free_gws(node->adev, node->gws);
649 
650 	/* Cleanup the node memory here */
651 	kfree(node);
652 	return err;
653 }
654 
655 static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
656 {
657 	struct kfd_node *knode;
658 	unsigned int i;
659 
660 	/*
661 	 * flush_work ensures that there are no outstanding
662 	 * work-queue items that will access interrupt_ring. New work items
663 	 * can't be created because we stopped interrupt handling above.
664 	 */
665 	flush_workqueue(kfd->ih_wq);
666 	destroy_workqueue(kfd->ih_wq);
667 
668 	for (i = 0; i < num_nodes; i++) {
669 		knode = kfd->nodes[i];
670 		device_queue_manager_uninit(knode->dqm);
671 		kfd_interrupt_exit(knode);
672 		kfd_topology_remove_device(knode);
673 		if (knode->gws)
674 			amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
675 		kfree(knode);
676 		kfd->nodes[i] = NULL;
677 	}
678 }
679 
680 static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
681 				       unsigned int kfd_node_idx)
682 {
683 	struct amdgpu_device *adev = node->adev;
684 	uint32_t xcc_mask = node->xcc_mask;
685 	uint32_t xcc, mapped_xcc;
686 	uint32_t bitmap;
687 	/*
688 	 * Interrupt bitmap is setup for processing interrupts from
689 	 * different XCDs and AIDs.
690 	 * Interrupt bitmap is defined as follows:
691 	 * 1. Bits 0-15 - correspond to the NodeId field.
692 	 *    Each bit corresponds to NodeId number. For example, if
693 	 *    a KFD node has interrupt bitmap set to 0x7, then this
694 	 *    KFD node will process interrupts with NodeId = 0, 1 and 2
695 	 *    in the IH cookie.
696 	 * 2. Bits 16-31 - unused.
697 	 *
698 	 * Please note that the kfd_node_idx argument passed to this
699 	 * function is not related to NodeId field received in the
700 	 * IH cookie.
701 	 *
702 	 * In CPX mode, a KFD node will process an interrupt if:
703 	 * - the Node Id matches the corresponding bit set in
704 	 *   Bits 0-15.
705 	 * - AND VMID reported in the interrupt lies within the
706 	 *   VMID range of the node.
707 	 */
708 	switch (KFD_GC_VERSION(node)) {
709 	case IP_VERSION(12, 1, 0):
710 		for_each_inst(xcc, xcc_mask) {
711 			mapped_xcc = GET_INST(GC, xcc);
712 			bitmap = 0x2 | (0x4 << (mapped_xcc % 4));
713 			if (mapped_xcc/4)
714 				bitmap = bitmap << 8;
715 			node->interrupt_bitmap |= bitmap;
716 		}
717 		break;
718 	default:
719 		for_each_inst(xcc, xcc_mask) {
720 			mapped_xcc = GET_INST(GC, xcc);
721 			node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
722 		}
723 		break;
724 	}
725 	dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
726 							node->interrupt_bitmap);
727 }
728 
729 bool kgd2kfd_device_init(struct kfd_dev *kfd,
730 			 const struct kgd2kfd_shared_resources *gpu_resources)
731 {
732 	unsigned int size, map_process_packet_size, i;
733 	struct kfd_node *node;
734 	uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
735 	unsigned int max_proc_per_quantum;
736 	int partition_mode;
737 	int xcp_idx;
738 
739 	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
740 			KGD_ENGINE_MEC1);
741 	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
742 			KGD_ENGINE_MEC2);
743 	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
744 			KGD_ENGINE_SDMA1);
745 	kfd->shared_resources = *gpu_resources;
746 
747 	kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
748 
749 	if (kfd->num_nodes == 0) {
750 		dev_err(kfd_device,
751 			"KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
752 			kfd->adev->gfx.num_xcc_per_xcp);
753 		goto out;
754 	}
755 
756 	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
757 	 * 32 and 64-bit requests are possible and must be
758 	 * supported.
759 	 */
760 	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
761 	if (!kfd->pci_atomic_requested &&
762 	    kfd->device_info.needs_pci_atomics &&
763 	    (!kfd->device_info.no_atomic_fw_version ||
764 	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
765 		dev_info(kfd_device,
766 			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
767 			 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
768 			 kfd->mec_fw_version,
769 			 kfd->device_info.no_atomic_fw_version);
770 		return false;
771 	}
772 
773 	first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
774 	last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
775 	vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
776 
777 	/* For multi-partition capable GPUs, we need special handling for VMIDs
778 	 * depending on partition mode.
779 	 * In CPX mode, the VMID range needs to be shared between XCDs.
780 	 * Additionally, there are 13 VMIDs (3-15) available for KFD. To
781 	 * divide them equally, we change starting VMID to 4 and not use
782 	 * VMID 3.
783 	 * If the VMID range changes for multi-partition capable GPUs, then
784 	 * this code MUST be revisited.
785 	 */
786 	if (kfd->adev->xcp_mgr && (KFD_GC_VERSION(kfd) != IP_VERSION(12, 1, 0))) {
787 		partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
788 								 AMDGPU_XCP_FL_LOCKED);
789 		if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
790 		    kfd->num_nodes != 1) {
791 			vmid_num_kfd /= 2;
792 			first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
793 		}
794 	}
795 
796 	/* Verify module parameters regarding mapped process number*/
797 	if (hws_max_conc_proc >= 0)
798 		max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
799 	else
800 		max_proc_per_quantum = vmid_num_kfd;
801 
802 	/* calculate max size of mqds needed for queues */
803 	size = max_num_of_queues_per_device *
804 			kfd->device_info.mqd_size_aligned;
805 
806 	/*
807 	 * calculate max size of runlist packet.
808 	 * There can be only 2 packets at once
809 	 */
810 	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
811 				sizeof(struct pm4_mes_map_process_aldebaran) :
812 				sizeof(struct pm4_mes_map_process);
813 	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
814 		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
815 		+ sizeof(struct pm4_mes_runlist)) * 2;
816 
817 	/* Add size of HIQ & DIQ */
818 	size += KFD_KERNEL_QUEUE_SIZE * 2;
819 
820 	/* add another 512KB for all other allocations on gart (HPD, fences) */
821 	size += 512 * 1024;
822 
823 	if (amdgpu_amdkfd_alloc_gtt_mem(
824 			kfd->adev, size, &kfd->gtt_mem,
825 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
826 			false)) {
827 		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
828 		goto alloc_gtt_mem_failure;
829 	}
830 
831 	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
832 
833 	/* Initialize GTT sa with 512 byte chunk size */
834 	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
835 		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
836 		goto kfd_gtt_sa_init_error;
837 	}
838 
839 	if (kfd_doorbell_init(kfd)) {
840 		dev_err(kfd_device,
841 			"Error initializing doorbell aperture\n");
842 		goto kfd_doorbell_error;
843 	}
844 
845 	if (amdgpu_use_xgmi_p2p)
846 		kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
847 
848 	/*
849 	 * For multi-partition capable GPUs, the KFD abstracts all partitions
850 	 * within a socket as xGMI connected in the topology so assign a unique
851 	 * hive id per device based on the pci device location if device is in
852 	 * PCIe mode.
853 	 */
854 	if (!kfd->hive_id && kfd->num_nodes > 1)
855 		kfd->hive_id = pci_dev_id(kfd->adev->pdev);
856 
857 	kfd->noretry = kfd->adev->gmc.noretry;
858 
859 	kfd_cwsr_init(kfd);
860 
861 	dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
862 				kfd->num_nodes);
863 
864 	/* Allocate the KFD nodes */
865 	for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
866 		node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
867 		if (!node)
868 			goto node_alloc_error;
869 
870 		node->node_id = i;
871 		node->adev = kfd->adev;
872 		node->kfd = kfd;
873 		node->kfd2kgd = kfd->kfd2kgd;
874 		node->vm_info.vmid_num_kfd = vmid_num_kfd;
875 		node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
876 		/* TODO : Check if error handling is needed */
877 		if (node->xcp) {
878 			amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
879 						    &node->xcc_mask);
880 			++xcp_idx;
881 		} else {
882 			node->xcc_mask =
883 				(1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
884 		}
885 
886 		if (node->xcp) {
887 			dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
888 				node->node_id, node->xcp->mem_id,
889 				KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
890 		}
891 
892 		if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
893 		    kfd->num_nodes != 1 &&
894 		    (KFD_GC_VERSION(kfd) != IP_VERSION(12, 1, 0))) {
895 			/* For multi-partition capable GPUs and CPX mode, first
896 			 * XCD gets VMID range 4-9 and second XCD gets VMID
897 			 * range 10-15.
898 			 */
899 
900 			node->vm_info.first_vmid_kfd = (i%2 == 0) ?
901 						first_vmid_kfd :
902 						first_vmid_kfd+vmid_num_kfd;
903 			node->vm_info.last_vmid_kfd = (i%2 == 0) ?
904 						last_vmid_kfd-vmid_num_kfd :
905 						last_vmid_kfd;
906 			node->compute_vmid_bitmap =
907 				((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
908 				((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
909 		} else {
910 			node->vm_info.first_vmid_kfd = first_vmid_kfd;
911 			node->vm_info.last_vmid_kfd = last_vmid_kfd;
912 			node->compute_vmid_bitmap =
913 				gpu_resources->compute_vmid_bitmap;
914 		}
915 
916 		node->max_proc_per_quantum = max_proc_per_quantum;
917 		atomic_set(&node->sram_ecc_flag, 0);
918 
919 		amdgpu_amdkfd_get_local_mem_info(kfd->adev,
920 					&node->local_mem_info, node->xcp);
921 
922 		if (kfd->adev->xcp_mgr)
923 			kfd_setup_interrupt_bitmap(node, i);
924 
925 		/* Initialize the KFD node */
926 		if (kfd_init_node(node)) {
927 			dev_err(kfd_device, "Error initializing KFD node\n");
928 			goto node_init_error;
929 		}
930 
931 		spin_lock_init(&node->watch_points_lock);
932 
933 		kfd->nodes[i] = node;
934 	}
935 
936 	svm_range_set_max_pages(kfd->adev);
937 
938 	kfd->init_complete = true;
939 	dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
940 		 kfd->adev->pdev->device);
941 
942 	pr_debug("Starting kfd with the following scheduling policy %d\n",
943 		node->dqm->sched_policy);
944 
945 	goto out;
946 
947 node_init_error:
948 node_alloc_error:
949 	kfd_cleanup_nodes(kfd, i);
950 	kfd_doorbell_fini(kfd);
951 kfd_doorbell_error:
952 	kfd_gtt_sa_fini(kfd);
953 kfd_gtt_sa_init_error:
954 	amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
955 alloc_gtt_mem_failure:
956 	dev_err(kfd_device,
957 		"device %x:%x NOT added due to errors\n",
958 		kfd->adev->pdev->vendor, kfd->adev->pdev->device);
959 out:
960 	return kfd->init_complete;
961 }
962 
963 void kgd2kfd_device_exit(struct kfd_dev *kfd)
964 {
965 	if (kfd->init_complete) {
966 		/* Cleanup KFD nodes */
967 		kfd_cleanup_nodes(kfd, kfd->num_nodes);
968 		/* Cleanup common/shared resources */
969 		kfd_doorbell_fini(kfd);
970 		ida_destroy(&kfd->doorbell_ida);
971 		kfd_gtt_sa_fini(kfd);
972 		amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
973 	}
974 
975 	kfree(kfd);
976 }
977 
978 int kgd2kfd_pre_reset(struct kfd_dev *kfd,
979 		      struct amdgpu_reset_context *reset_context)
980 {
981 	struct kfd_node *node;
982 	int i;
983 
984 	if (!kfd->init_complete)
985 		return 0;
986 
987 	for (i = 0; i < kfd->num_nodes; i++) {
988 		node = kfd->nodes[i];
989 		kfd_smi_event_update_gpu_reset(node, false, reset_context);
990 	}
991 
992 	kgd2kfd_suspend(kfd, true);
993 
994 	for (i = 0; i < kfd->num_nodes; i++)
995 		kfd_signal_reset_event(kfd->nodes[i]);
996 
997 	return 0;
998 }
999 
1000 /*
1001  * Fix me. KFD won't be able to resume existing process for now.
1002  * We will keep all existing process in a evicted state and
1003  * wait the process to be terminated.
1004  */
1005 
1006 int kgd2kfd_post_reset(struct kfd_dev *kfd)
1007 {
1008 	int ret;
1009 	struct kfd_node *node;
1010 	int i;
1011 
1012 	if (!kfd->init_complete)
1013 		return 0;
1014 
1015 	for (i = 0; i < kfd->num_nodes; i++) {
1016 		ret = kfd_resume(kfd->nodes[i]);
1017 		if (ret)
1018 			return ret;
1019 	}
1020 
1021 	mutex_lock(&kfd_processes_mutex);
1022 	--kfd_locked;
1023 	mutex_unlock(&kfd_processes_mutex);
1024 
1025 	for (i = 0; i < kfd->num_nodes; i++) {
1026 		node = kfd->nodes[i];
1027 		atomic_set(&node->sram_ecc_flag, 0);
1028 		kfd_smi_event_update_gpu_reset(node, true, NULL);
1029 	}
1030 
1031 	return 0;
1032 }
1033 
1034 bool kfd_is_locked(struct kfd_dev *kfd)
1035 {
1036 	uint8_t id  = 0;
1037 	struct kfd_node *dev;
1038 
1039 	lockdep_assert_held(&kfd_processes_mutex);
1040 
1041 	/* check reset/suspend lock */
1042 	if (kfd_locked > 0)
1043 		return true;
1044 
1045 	if (kfd)
1046 		return kfd->kfd_dev_lock > 0;
1047 
1048 	/* check lock on all cgroup accessible devices */
1049 	while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
1050 		if (!dev || kfd_devcgroup_check_permission(dev))
1051 			continue;
1052 
1053 		if (dev->kfd->kfd_dev_lock > 0)
1054 			return true;
1055 	}
1056 
1057 	return false;
1058 }
1059 
1060 void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
1061 {
1062 	struct kfd_node *node;
1063 	int i;
1064 
1065 	if (!kfd->init_complete)
1066 		return;
1067 
1068 	if (suspend_proc)
1069 		kgd2kfd_suspend_process(kfd);
1070 
1071 	for (i = 0; i < kfd->num_nodes; i++) {
1072 		node = kfd->nodes[i];
1073 		node->dqm->ops.stop(node->dqm);
1074 	}
1075 }
1076 
1077 int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
1078 {
1079 	int ret = 0, i;
1080 
1081 	if (!kfd->init_complete)
1082 		return 0;
1083 
1084 	for (i = 0; i < kfd->num_nodes; i++) {
1085 		ret = kfd_resume(kfd->nodes[i]);
1086 		if (ret)
1087 			return ret;
1088 	}
1089 
1090 	if (resume_proc)
1091 		ret = kgd2kfd_resume_process(kfd);
1092 
1093 	return ret;
1094 }
1095 
1096 void kgd2kfd_suspend_process(struct kfd_dev *kfd)
1097 {
1098 	if (!kfd->init_complete)
1099 		return;
1100 
1101 	mutex_lock(&kfd_processes_mutex);
1102 	/* For first KFD device suspend all the KFD processes */
1103 	if (++kfd_locked == 1)
1104 		kfd_suspend_all_processes();
1105 	mutex_unlock(&kfd_processes_mutex);
1106 }
1107 
1108 int kgd2kfd_resume_process(struct kfd_dev *kfd)
1109 {
1110 	int ret = 0;
1111 
1112 	if (!kfd->init_complete)
1113 		return 0;
1114 
1115 	mutex_lock(&kfd_processes_mutex);
1116 	if (--kfd_locked == 0)
1117 		ret = kfd_resume_all_processes();
1118 	WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
1119 	mutex_unlock(&kfd_processes_mutex);
1120 
1121 	return ret;
1122 }
1123 
1124 static int kfd_resume(struct kfd_node *node)
1125 {
1126 	int err = 0;
1127 
1128 	err = node->dqm->ops.start(node->dqm);
1129 	if (err)
1130 		dev_err(kfd_device,
1131 			"Error starting queue manager for device %x:%x\n",
1132 			node->adev->pdev->vendor, node->adev->pdev->device);
1133 
1134 	return err;
1135 }
1136 
1137 /* This is called directly from KGD at ISR. */
1138 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1139 {
1140 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
1141 	bool is_patched = false;
1142 	unsigned long flags;
1143 	struct kfd_node *node;
1144 
1145 	if (!kfd->init_complete)
1146 		return;
1147 
1148 	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
1149 		dev_err_once(kfd_device, "Ring entry too small\n");
1150 		return;
1151 	}
1152 
1153 	for (i = 0; i < kfd->num_nodes; i++) {
1154 		/* Race if another thread in b/w
1155 		 * kfd_cleanup_nodes and kfree(kfd),
1156 		 * when kfd->nodes[i] = NULL
1157 		 */
1158 		if (kfd->nodes[i])
1159 			node = kfd->nodes[i];
1160 		else
1161 			return;
1162 
1163 		spin_lock_irqsave(&node->interrupt_lock, flags);
1164 
1165 		if (node->interrupts_active
1166 		    && interrupt_is_wanted(node, ih_ring_entry,
1167 			    	patched_ihre, &is_patched)
1168 		    && enqueue_ih_ring_entry(node,
1169 			    	is_patched ? patched_ihre : ih_ring_entry)) {
1170 			queue_work(node->kfd->ih_wq, &node->interrupt_work);
1171 			spin_unlock_irqrestore(&node->interrupt_lock, flags);
1172 			return;
1173 		}
1174 		spin_unlock_irqrestore(&node->interrupt_lock, flags);
1175 	}
1176 
1177 }
1178 
1179 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
1180 {
1181 	struct kfd_process *p;
1182 	int r;
1183 
1184 	/* Because we are called from arbitrary context (workqueue) as opposed
1185 	 * to process context, kfd_process could attempt to exit while we are
1186 	 * running so the lookup function increments the process ref count.
1187 	 */
1188 	p = kfd_lookup_process_by_mm(mm);
1189 	if (!p)
1190 		return -ESRCH;
1191 
1192 	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1193 	r = kfd_process_evict_queues(p, trigger);
1194 
1195 	kfd_unref_process(p);
1196 	return r;
1197 }
1198 
1199 int kgd2kfd_resume_mm(struct mm_struct *mm)
1200 {
1201 	struct kfd_process *p;
1202 	int r;
1203 
1204 	/* Because we are called from arbitrary context (workqueue) as opposed
1205 	 * to process context, kfd_process could attempt to exit while we are
1206 	 * running so the lookup function increments the process ref count.
1207 	 */
1208 	p = kfd_lookup_process_by_mm(mm);
1209 	if (!p)
1210 		return -ESRCH;
1211 
1212 	r = kfd_process_restore_queues(p);
1213 
1214 	kfd_unref_process(p);
1215 	return r;
1216 }
1217 
1218 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1219  *   prepare for safe eviction of KFD BOs that belong to the specified
1220  *   process.
1221  *
1222  * @mm: mm_struct that identifies a group of KFD processes
1223  * @context_id: an id that identifies a specific KFD context in the above kfd process group
1224  * @fence: eviction fence attached to KFD process BOs
1225  *
1226  */
1227 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1228 					       u16 context_id, struct dma_fence *fence)
1229 {
1230 	struct kfd_process *p;
1231 	unsigned long active_time;
1232 	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1233 
1234 	if (!fence)
1235 		return -EINVAL;
1236 
1237 	if (dma_fence_is_signaled(fence))
1238 		return 0;
1239 
1240 	p = kfd_lookup_process_by_id(mm, context_id);
1241 	if (!p)
1242 		return -ENODEV;
1243 
1244 	if (fence->seqno == p->last_eviction_seqno)
1245 		goto out;
1246 
1247 	p->last_eviction_seqno = fence->seqno;
1248 
1249 	/* Avoid KFD process starvation. Wait for at least
1250 	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1251 	 */
1252 	active_time = get_jiffies_64() - p->last_restore_timestamp;
1253 	if (delay_jiffies > active_time)
1254 		delay_jiffies -= active_time;
1255 	else
1256 		delay_jiffies = 0;
1257 
1258 	/* During process initialization eviction_work.dwork is initialized
1259 	 * to kfd_evict_bo_worker
1260 	 */
1261 	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1262 	     p->lead_thread->pid, delay_jiffies);
1263 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
1264 out:
1265 	kfd_unref_process(p);
1266 	return 0;
1267 }
1268 
1269 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1270 				unsigned int chunk_size)
1271 {
1272 	if (WARN_ON(buf_size < chunk_size))
1273 		return -EINVAL;
1274 	if (WARN_ON(buf_size == 0))
1275 		return -EINVAL;
1276 	if (WARN_ON(chunk_size == 0))
1277 		return -EINVAL;
1278 
1279 	kfd->gtt_sa_chunk_size = chunk_size;
1280 	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1281 
1282 	kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1283 					   GFP_KERNEL);
1284 	if (!kfd->gtt_sa_bitmap)
1285 		return -ENOMEM;
1286 
1287 	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1288 			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1289 
1290 	mutex_init(&kfd->gtt_sa_lock);
1291 
1292 	return 0;
1293 }
1294 
1295 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1296 {
1297 	mutex_destroy(&kfd->gtt_sa_lock);
1298 	bitmap_free(kfd->gtt_sa_bitmap);
1299 }
1300 
1301 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1302 						unsigned int bit_num,
1303 						unsigned int chunk_size)
1304 {
1305 	return start_addr + bit_num * chunk_size;
1306 }
1307 
1308 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1309 						unsigned int bit_num,
1310 						unsigned int chunk_size)
1311 {
1312 	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1313 }
1314 
1315 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1316 			struct kfd_mem_obj **mem_obj)
1317 {
1318 	unsigned int found, start_search, cur_size;
1319 	struct kfd_dev *kfd = node->kfd;
1320 
1321 	if (size == 0)
1322 		return -EINVAL;
1323 
1324 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1325 		return -ENOMEM;
1326 
1327 	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1328 	if (!(*mem_obj))
1329 		return -ENOMEM;
1330 
1331 	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1332 
1333 	start_search = 0;
1334 
1335 	mutex_lock(&kfd->gtt_sa_lock);
1336 
1337 kfd_gtt_restart_search:
1338 	/* Find the first chunk that is free */
1339 	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1340 					kfd->gtt_sa_num_of_chunks,
1341 					start_search);
1342 
1343 	pr_debug("Found = %d\n", found);
1344 
1345 	/* If there wasn't any free chunk, bail out */
1346 	if (found == kfd->gtt_sa_num_of_chunks)
1347 		goto kfd_gtt_no_free_chunk;
1348 
1349 	/* Update fields of mem_obj */
1350 	(*mem_obj)->range_start = found;
1351 	(*mem_obj)->range_end = found;
1352 	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1353 					kfd->gtt_start_gpu_addr,
1354 					found,
1355 					kfd->gtt_sa_chunk_size);
1356 	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1357 					kfd->gtt_start_cpu_ptr,
1358 					found,
1359 					kfd->gtt_sa_chunk_size);
1360 
1361 	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1362 			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1363 
1364 	/* If we need only one chunk, mark it as allocated and get out */
1365 	if (size <= kfd->gtt_sa_chunk_size) {
1366 		pr_debug("Single bit\n");
1367 		__set_bit(found, kfd->gtt_sa_bitmap);
1368 		goto kfd_gtt_out;
1369 	}
1370 
1371 	/* Otherwise, try to see if we have enough contiguous chunks */
1372 	cur_size = size - kfd->gtt_sa_chunk_size;
1373 	do {
1374 		(*mem_obj)->range_end =
1375 			find_next_zero_bit(kfd->gtt_sa_bitmap,
1376 					kfd->gtt_sa_num_of_chunks, ++found);
1377 		/*
1378 		 * If next free chunk is not contiguous than we need to
1379 		 * restart our search from the last free chunk we found (which
1380 		 * wasn't contiguous to the previous ones
1381 		 */
1382 		if ((*mem_obj)->range_end != found) {
1383 			start_search = found;
1384 			goto kfd_gtt_restart_search;
1385 		}
1386 
1387 		/*
1388 		 * If we reached end of buffer, bail out with error
1389 		 */
1390 		if (found == kfd->gtt_sa_num_of_chunks)
1391 			goto kfd_gtt_no_free_chunk;
1392 
1393 		/* Check if we don't need another chunk */
1394 		if (cur_size <= kfd->gtt_sa_chunk_size)
1395 			cur_size = 0;
1396 		else
1397 			cur_size -= kfd->gtt_sa_chunk_size;
1398 
1399 	} while (cur_size > 0);
1400 
1401 	pr_debug("range_start = %d, range_end = %d\n",
1402 		(*mem_obj)->range_start, (*mem_obj)->range_end);
1403 
1404 	/* Mark the chunks as allocated */
1405 	bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1406 		   (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1407 
1408 kfd_gtt_out:
1409 	mutex_unlock(&kfd->gtt_sa_lock);
1410 	return 0;
1411 
1412 kfd_gtt_no_free_chunk:
1413 	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1414 	mutex_unlock(&kfd->gtt_sa_lock);
1415 	kfree(*mem_obj);
1416 	return -ENOMEM;
1417 }
1418 
1419 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
1420 {
1421 	struct kfd_dev *kfd = node->kfd;
1422 
1423 	/* Act like kfree when trying to free a NULL object */
1424 	if (!mem_obj)
1425 		return 0;
1426 
1427 	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1428 			mem_obj, mem_obj->range_start, mem_obj->range_end);
1429 
1430 	mutex_lock(&kfd->gtt_sa_lock);
1431 
1432 	/* Mark the chunks as free */
1433 	bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1434 		     mem_obj->range_end - mem_obj->range_start + 1);
1435 
1436 	mutex_unlock(&kfd->gtt_sa_lock);
1437 
1438 	kfree(mem_obj);
1439 	return 0;
1440 }
1441 
1442 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1443 {
1444 	/*
1445 	 * TODO: Currently update SRAM ECC flag for first node.
1446 	 * This needs to be updated later when we can
1447 	 * identify SRAM ECC error on other nodes also.
1448 	 */
1449 	if (kfd)
1450 		atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
1451 }
1452 
1453 void kfd_inc_compute_active(struct kfd_node *node)
1454 {
1455 	if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1456 		amdgpu_amdkfd_set_compute_idle(node->adev, false);
1457 }
1458 
1459 void kfd_dec_compute_active(struct kfd_node *node)
1460 {
1461 	int count = atomic_dec_return(&node->kfd->compute_profile);
1462 
1463 	if (count == 0)
1464 		amdgpu_amdkfd_set_compute_idle(node->adev, true);
1465 	WARN_ONCE(count < 0, "Compute profile ref. count error");
1466 }
1467 
1468 static bool kfd_compute_active(struct kfd_node *node)
1469 {
1470 	if (atomic_read(&node->kfd->compute_profile))
1471 		return true;
1472 	return false;
1473 }
1474 
1475 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1476 {
1477 	/*
1478 	 * TODO: For now, raise the throttling event only on first node.
1479 	 * This will need to change after we are able to determine
1480 	 * which node raised the throttling event.
1481 	 */
1482 	if (kfd && kfd->init_complete)
1483 		kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
1484 							throttle_bitmask);
1485 }
1486 
1487 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1488  * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1489  * When the device has more than two engines, we reserve two for PCIe to enable
1490  * full-duplex and the rest are used as XGMI.
1491  */
1492 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1493 {
1494 	/* If XGMI is not supported, all SDMA engines are PCIe */
1495 	if (!node->adev->gmc.xgmi.supported)
1496 		return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
1497 
1498 	return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
1499 }
1500 
1501 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1502 {
1503 	/* After reserved for PCIe, the rest of engines are XGMI */
1504 	return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
1505 		kfd_get_num_sdma_engines(node);
1506 }
1507 
1508 int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
1509 {
1510 	struct kfd_process *p;
1511 	int r = 0, temp, idx;
1512 
1513 	mutex_lock(&kfd_processes_mutex);
1514 
1515 	/* kfd_processes_count is per kfd_dev, return -EBUSY without
1516 	 * further check
1517 	 */
1518 	if (!!atomic_read(&kfd->kfd_processes_count)) {
1519 		pr_debug("process_wq_release not finished\n");
1520 		r = -EBUSY;
1521 		goto out;
1522 	}
1523 
1524 	if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
1525 		goto out;
1526 
1527 	/* fail under system reset/resume or kfd device is partition switching. */
1528 	if (kfd_is_locked(kfd)) {
1529 		r = -EBUSY;
1530 		goto out;
1531 	}
1532 
1533 	/*
1534 	 * ensure all running processes are cgroup excluded from device before mode switch.
1535 	 * i.e. no pdd was created on the process socket.
1536 	 */
1537 	idx = srcu_read_lock(&kfd_processes_srcu);
1538 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1539 		int i;
1540 
1541 		for (i = 0; i < p->n_pdds; i++) {
1542 			if (p->pdds[i]->dev->kfd != kfd)
1543 				continue;
1544 
1545 			r = -EBUSY;
1546 			goto proc_check_unlock;
1547 		}
1548 	}
1549 
1550 proc_check_unlock:
1551 	srcu_read_unlock(&kfd_processes_srcu, idx);
1552 out:
1553 	if (!r)
1554 		++kfd->kfd_dev_lock;
1555 	mutex_unlock(&kfd_processes_mutex);
1556 
1557 	return r;
1558 }
1559 
1560 void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
1561 {
1562 	mutex_lock(&kfd_processes_mutex);
1563 	--kfd->kfd_dev_lock;
1564 	mutex_unlock(&kfd_processes_mutex);
1565 }
1566 
1567 int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
1568 {
1569 	struct kfd_node *node;
1570 	int ret;
1571 
1572 	if (!kfd->init_complete)
1573 		return 0;
1574 
1575 	if (node_id >= kfd->num_nodes) {
1576 		dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1577 			 node_id, kfd->num_nodes - 1);
1578 		return -EINVAL;
1579 	}
1580 	node = kfd->nodes[node_id];
1581 
1582 	ret = node->dqm->ops.unhalt(node->dqm);
1583 	if (ret)
1584 		dev_err(kfd_device, "Error in starting scheduler\n");
1585 
1586 	return ret;
1587 }
1588 
1589 int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
1590 {
1591 	struct kfd_node *node;
1592 	int i, r;
1593 
1594 	if (!kfd->init_complete)
1595 		return 0;
1596 
1597 	for (i = 0; i < kfd->num_nodes; i++) {
1598 		node = kfd->nodes[i];
1599 		r = node->dqm->ops.unhalt(node->dqm);
1600 		if (r) {
1601 			dev_err(kfd_device, "Error in starting scheduler\n");
1602 			return r;
1603 		}
1604 	}
1605 	return 0;
1606 }
1607 
1608 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
1609 {
1610 	struct kfd_node *node;
1611 
1612 	if (!kfd->init_complete)
1613 		return 0;
1614 
1615 	if (node_id >= kfd->num_nodes) {
1616 		dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1617 			 node_id, kfd->num_nodes - 1);
1618 		return -EINVAL;
1619 	}
1620 
1621 	node = kfd->nodes[node_id];
1622 	return node->dqm->ops.halt(node->dqm);
1623 }
1624 
1625 int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
1626 {
1627 	struct kfd_node *node;
1628 	int i, r;
1629 
1630 	if (!kfd->init_complete)
1631 		return 0;
1632 
1633 	for (i = 0; i < kfd->num_nodes; i++) {
1634 		node = kfd->nodes[i];
1635 		r = node->dqm->ops.halt(node->dqm);
1636 		if (r)
1637 			return r;
1638 	}
1639 	return 0;
1640 }
1641 
1642 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
1643 {
1644 	struct kfd_node *node;
1645 
1646 	if (!kfd->init_complete)
1647 		return false;
1648 
1649 	if (node_id >= kfd->num_nodes) {
1650 		dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1651 			 node_id, kfd->num_nodes - 1);
1652 		return false;
1653 	}
1654 
1655 	node = kfd->nodes[node_id];
1656 
1657 	return kfd_compute_active(node);
1658 }
1659 
1660 /**
1661  * kgd2kfd_vmfault_fast_path() - KFD vm page fault interrupt handling fast path for gmc v9
1662  * @adev: amdgpu device
1663  * @entry: vm fault interrupt vector
1664  * @retry_fault: if this is retry fault
1665  *
1666  * retry fault -
1667  *    with CAM enabled, adev primary ring
1668  *                           |  gmc_v9_0_process_interrupt()
1669  *                      adev soft_ring
1670  *                           |  gmc_v9_0_process_interrupt() worker failed to recover page fault
1671  *                      KFD node ih_fifo
1672  *                           |  KFD interrupt_wq worker
1673  *                      kfd_signal_vm_fault_event
1674  *
1675  *    without CAM,      adev primary ring1
1676  *                           |  gmc_v9_0_process_interrupt worker failed to recvoer page fault
1677  *                      KFD node ih_fifo
1678  *                           |  KFD interrupt_wq worker
1679  *                      kfd_signal_vm_fault_event
1680  *
1681  * no-retry fault -
1682  *                      adev primary ring
1683  *                           |  gmc_v9_0_process_interrupt()
1684  *                      KFD node ih_fifo
1685  *                           |  KFD interrupt_wq worker
1686  *                      kfd_signal_vm_fault_event
1687  *
1688  * fast path - After kfd_signal_vm_fault_event, gmc_v9_0_process_interrupt drop the page fault
1689  *            of same process, don't copy interrupt to KFD node ih_fifo.
1690  *            With gdb debugger enabled, need convert the retry fault to no-retry fault for
1691  *            debugger, cannot use the fast path.
1692  *
1693  * Return:
1694  *   true - use the fast path to handle this fault
1695  *   false - use normal path to handle it
1696  */
1697 bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
1698 			       bool retry_fault)
1699 {
1700 	struct kfd_process *p;
1701 	u32 cam_index;
1702 	u32 src_data_idx;
1703 
1704 	src_data_idx = (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) ?
1705 			3 : 2;
1706 
1707 	if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
1708 		p = kfd_lookup_process_by_pasid(entry->pasid, NULL);
1709 		if (!p)
1710 			return true;
1711 
1712 		if (p->gpu_page_fault && !p->debug_trap_enabled) {
1713 			if (retry_fault && adev->irq.retry_cam_enabled) {
1714 				cam_index = entry->src_data[src_data_idx] & 0x3ff;
1715 
1716 				WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
1717 			}
1718 
1719 			kfd_unref_process(p);
1720 			return true;
1721 		}
1722 
1723 		/*
1724 		 * This is the first page fault, set flag and then signal user space
1725 		 */
1726 		p->gpu_page_fault = true;
1727 		kfd_unref_process(p);
1728 	}
1729 	return false;
1730 }
1731 
1732 #if defined(CONFIG_DEBUG_FS)
1733 
1734 /* This function will send a package to HIQ to hang the HWS
1735  * which will trigger a GPU reset and bring the HWS back to normal state
1736  */
1737 int kfd_debugfs_hang_hws(struct kfd_node *dev)
1738 {
1739 	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1740 		pr_err("HWS is not enabled");
1741 		return -EINVAL;
1742 	}
1743 
1744 	if (dev->kfd->shared_resources.enable_mes) {
1745 		dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
1746 		return -EINVAL;
1747 	}
1748 
1749 	return dqm_debugfs_hang_hws(dev->dqm);
1750 }
1751 
1752 #endif
1753