1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include "kfd_priv.h"
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "amdgpu_amdkfd.h"
33 #include "kfd_smi_events.h"
34 #include "kfd_svm.h"
35 #include "kfd_migrate.h"
36 #include "amdgpu.h"
37 #include "amdgpu_xcp.h"
38
39 #define MQD_SIZE_ALIGNED 768
40
41 /*
42 * kfd_locked is used to lock the kfd driver during suspend or reset
43 * once locked, kfd driver will stop any further GPU execution.
44 * create process (open) will return -EAGAIN.
45 */
46 static int kfd_locked;
47
48 #ifdef CONFIG_DRM_AMDGPU_CIK
49 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
50 #endif
51 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
52 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
53 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
54 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
55 extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
56 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
57 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
58 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
59 extern const struct kfd2kgd_calls gfx_v12_kfd2kgd;
60
61 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
62 unsigned int chunk_size);
63 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
64
65 static int kfd_resume(struct kfd_node *kfd);
66
kfd_device_info_set_sdma_info(struct kfd_dev * kfd)67 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
68 {
69 uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);
70
71 switch (sdma_version) {
72 case IP_VERSION(4, 0, 0):/* VEGA10 */
73 case IP_VERSION(4, 0, 1):/* VEGA12 */
74 case IP_VERSION(4, 1, 0):/* RAVEN */
75 case IP_VERSION(4, 1, 1):/* RAVEN */
76 case IP_VERSION(4, 1, 2):/* RENOIR */
77 case IP_VERSION(5, 2, 1):/* VANGOGH */
78 case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
79 case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
80 case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
81 kfd->device_info.num_sdma_queues_per_engine = 2;
82 break;
83 case IP_VERSION(4, 2, 0):/* VEGA20 */
84 case IP_VERSION(4, 2, 2):/* ARCTURUS */
85 case IP_VERSION(4, 4, 0):/* ALDEBARAN */
86 case IP_VERSION(4, 4, 2):
87 case IP_VERSION(4, 4, 5):
88 case IP_VERSION(4, 4, 4):
89 case IP_VERSION(5, 0, 0):/* NAVI10 */
90 case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
91 case IP_VERSION(5, 0, 2):/* NAVI14 */
92 case IP_VERSION(5, 0, 5):/* NAVI12 */
93 case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
94 case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
95 case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
96 case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
97 case IP_VERSION(6, 0, 0):
98 case IP_VERSION(6, 0, 1):
99 case IP_VERSION(6, 0, 2):
100 case IP_VERSION(6, 0, 3):
101 case IP_VERSION(6, 1, 0):
102 case IP_VERSION(6, 1, 1):
103 case IP_VERSION(6, 1, 2):
104 case IP_VERSION(7, 0, 0):
105 case IP_VERSION(7, 0, 1):
106 kfd->device_info.num_sdma_queues_per_engine = 8;
107 break;
108 default:
109 dev_warn(kfd_device,
110 "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
111 sdma_version);
112 kfd->device_info.num_sdma_queues_per_engine = 8;
113 }
114
115 bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
116
117 switch (sdma_version) {
118 case IP_VERSION(6, 0, 0):
119 case IP_VERSION(6, 0, 1):
120 case IP_VERSION(6, 0, 2):
121 case IP_VERSION(6, 0, 3):
122 case IP_VERSION(6, 1, 0):
123 case IP_VERSION(6, 1, 1):
124 case IP_VERSION(6, 1, 2):
125 case IP_VERSION(7, 0, 0):
126 case IP_VERSION(7, 0, 1):
127 /* Reserve 1 for paging and 1 for gfx */
128 kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
129 /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
130 bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
131 kfd->adev->sdma.num_instances *
132 kfd->device_info.num_reserved_sdma_queues_per_engine);
133 break;
134 default:
135 break;
136 }
137 }
138
kfd_device_info_set_event_interrupt_class(struct kfd_dev * kfd)139 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
140 {
141 uint32_t gc_version = KFD_GC_VERSION(kfd);
142
143 switch (gc_version) {
144 case IP_VERSION(9, 0, 1): /* VEGA10 */
145 case IP_VERSION(9, 1, 0): /* RAVEN */
146 case IP_VERSION(9, 2, 1): /* VEGA12 */
147 case IP_VERSION(9, 2, 2): /* RAVEN */
148 case IP_VERSION(9, 3, 0): /* RENOIR */
149 case IP_VERSION(9, 4, 0): /* VEGA20 */
150 case IP_VERSION(9, 4, 1): /* ARCTURUS */
151 case IP_VERSION(9, 4, 2): /* ALDEBARAN */
152 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
153 break;
154 case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
155 case IP_VERSION(9, 4, 4): /* GC 9.4.4 */
156 case IP_VERSION(9, 5, 0): /* GC 9.5.0 */
157 kfd->device_info.event_interrupt_class =
158 &event_interrupt_class_v9_4_3;
159 break;
160 case IP_VERSION(10, 3, 1): /* VANGOGH */
161 case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
162 case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
163 case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
164 case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
165 case IP_VERSION(10, 1, 4):
166 case IP_VERSION(10, 1, 10): /* NAVI10 */
167 case IP_VERSION(10, 1, 2): /* NAVI12 */
168 case IP_VERSION(10, 1, 1): /* NAVI14 */
169 case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
170 case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
171 case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
172 case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
173 kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
174 break;
175 case IP_VERSION(11, 0, 0):
176 case IP_VERSION(11, 0, 1):
177 case IP_VERSION(11, 0, 2):
178 case IP_VERSION(11, 0, 3):
179 case IP_VERSION(11, 0, 4):
180 case IP_VERSION(11, 5, 0):
181 case IP_VERSION(11, 5, 1):
182 case IP_VERSION(11, 5, 2):
183 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
184 break;
185 case IP_VERSION(12, 0, 0):
186 case IP_VERSION(12, 0, 1):
187 /* GFX12_TODO: Change to v12 version. */
188 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
189 break;
190 default:
191 dev_warn(kfd_device, "v9 event interrupt handler is set due to "
192 "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
193 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
194 }
195 }
196
kfd_device_info_init(struct kfd_dev * kfd,bool vf,uint32_t gfx_target_version)197 static void kfd_device_info_init(struct kfd_dev *kfd,
198 bool vf, uint32_t gfx_target_version)
199 {
200 uint32_t gc_version = KFD_GC_VERSION(kfd);
201 uint32_t asic_type = kfd->adev->asic_type;
202
203 kfd->device_info.max_pasid_bits = 16;
204 kfd->device_info.max_no_of_hqd = 24;
205 kfd->device_info.num_of_watch_points = 4;
206 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
207 kfd->device_info.gfx_target_version = gfx_target_version;
208
209 if (KFD_IS_SOC15(kfd)) {
210 kfd->device_info.doorbell_size = 8;
211 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
212 kfd->device_info.supports_cwsr = true;
213
214 kfd_device_info_set_sdma_info(kfd);
215
216 kfd_device_info_set_event_interrupt_class(kfd);
217
218 if (gc_version < IP_VERSION(11, 0, 0)) {
219 /* Navi2x+, Navi1x+ */
220 if (gc_version == IP_VERSION(10, 3, 6))
221 kfd->device_info.no_atomic_fw_version = 14;
222 else if (gc_version == IP_VERSION(10, 3, 7))
223 kfd->device_info.no_atomic_fw_version = 3;
224 else if (gc_version >= IP_VERSION(10, 3, 0))
225 kfd->device_info.no_atomic_fw_version = 92;
226 else if (gc_version >= IP_VERSION(10, 1, 1))
227 kfd->device_info.no_atomic_fw_version = 145;
228
229 /* Navi1x+ */
230 if (gc_version >= IP_VERSION(10, 1, 1))
231 kfd->device_info.needs_pci_atomics = true;
232 } else if (gc_version < IP_VERSION(12, 0, 0)) {
233 /*
234 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
235 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
236 * PCIe atomics support.
237 */
238 kfd->device_info.needs_pci_atomics = true;
239 kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
240 } else if (gc_version < IP_VERSION(13, 0, 0)) {
241 kfd->device_info.needs_pci_atomics = true;
242 kfd->device_info.no_atomic_fw_version = 2090;
243 } else {
244 kfd->device_info.needs_pci_atomics = true;
245 }
246 } else {
247 kfd->device_info.doorbell_size = 4;
248 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
249 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
250 kfd->device_info.num_sdma_queues_per_engine = 2;
251
252 if (asic_type != CHIP_KAVERI &&
253 asic_type != CHIP_HAWAII &&
254 asic_type != CHIP_TONGA)
255 kfd->device_info.supports_cwsr = true;
256
257 if (asic_type != CHIP_HAWAII && !vf)
258 kfd->device_info.needs_pci_atomics = true;
259 }
260 }
261
kgd2kfd_probe(struct amdgpu_device * adev,bool vf)262 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
263 {
264 struct kfd_dev *kfd = NULL;
265 const struct kfd2kgd_calls *f2g = NULL;
266 uint32_t gfx_target_version = 0;
267
268 switch (adev->asic_type) {
269 #ifdef CONFIG_DRM_AMDGPU_CIK
270 case CHIP_KAVERI:
271 gfx_target_version = 70000;
272 if (!vf)
273 f2g = &gfx_v7_kfd2kgd;
274 break;
275 #endif
276 case CHIP_CARRIZO:
277 gfx_target_version = 80001;
278 if (!vf)
279 f2g = &gfx_v8_kfd2kgd;
280 break;
281 #ifdef CONFIG_DRM_AMDGPU_CIK
282 case CHIP_HAWAII:
283 gfx_target_version = 70001;
284 if (!amdgpu_exp_hw_support)
285 pr_info(
286 "KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
287 );
288 else if (!vf)
289 f2g = &gfx_v7_kfd2kgd;
290 break;
291 #endif
292 case CHIP_TONGA:
293 gfx_target_version = 80002;
294 if (!vf)
295 f2g = &gfx_v8_kfd2kgd;
296 break;
297 case CHIP_FIJI:
298 case CHIP_POLARIS10:
299 gfx_target_version = 80003;
300 f2g = &gfx_v8_kfd2kgd;
301 break;
302 case CHIP_POLARIS11:
303 case CHIP_POLARIS12:
304 case CHIP_VEGAM:
305 gfx_target_version = 80003;
306 if (!vf)
307 f2g = &gfx_v8_kfd2kgd;
308 break;
309 default:
310 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
311 /* Vega 10 */
312 case IP_VERSION(9, 0, 1):
313 gfx_target_version = 90000;
314 f2g = &gfx_v9_kfd2kgd;
315 break;
316 /* Raven */
317 case IP_VERSION(9, 1, 0):
318 case IP_VERSION(9, 2, 2):
319 gfx_target_version = 90002;
320 if (!vf)
321 f2g = &gfx_v9_kfd2kgd;
322 break;
323 /* Vega12 */
324 case IP_VERSION(9, 2, 1):
325 gfx_target_version = 90004;
326 if (!vf)
327 f2g = &gfx_v9_kfd2kgd;
328 break;
329 /* Renoir */
330 case IP_VERSION(9, 3, 0):
331 gfx_target_version = 90012;
332 if (!vf)
333 f2g = &gfx_v9_kfd2kgd;
334 break;
335 /* Vega20 */
336 case IP_VERSION(9, 4, 0):
337 gfx_target_version = 90006;
338 if (!vf)
339 f2g = &gfx_v9_kfd2kgd;
340 break;
341 /* Arcturus */
342 case IP_VERSION(9, 4, 1):
343 gfx_target_version = 90008;
344 f2g = &arcturus_kfd2kgd;
345 break;
346 /* Aldebaran */
347 case IP_VERSION(9, 4, 2):
348 gfx_target_version = 90010;
349 f2g = &aldebaran_kfd2kgd;
350 break;
351 case IP_VERSION(9, 4, 3):
352 gfx_target_version = adev->rev_id >= 1 ? 90402
353 : adev->flags & AMD_IS_APU ? 90400
354 : 90401;
355 f2g = &gc_9_4_3_kfd2kgd;
356 break;
357 case IP_VERSION(9, 4, 4):
358 gfx_target_version = 90402;
359 f2g = &gc_9_4_3_kfd2kgd;
360 break;
361 case IP_VERSION(9, 5, 0):
362 gfx_target_version = 90500;
363 f2g = &gc_9_4_3_kfd2kgd;
364 break;
365 /* Navi10 */
366 case IP_VERSION(10, 1, 10):
367 gfx_target_version = 100100;
368 if (!vf)
369 f2g = &gfx_v10_kfd2kgd;
370 break;
371 /* Navi12 */
372 case IP_VERSION(10, 1, 2):
373 gfx_target_version = 100101;
374 f2g = &gfx_v10_kfd2kgd;
375 break;
376 /* Navi14 */
377 case IP_VERSION(10, 1, 1):
378 gfx_target_version = 100102;
379 if (!vf)
380 f2g = &gfx_v10_kfd2kgd;
381 break;
382 /* Cyan Skillfish */
383 case IP_VERSION(10, 1, 3):
384 case IP_VERSION(10, 1, 4):
385 gfx_target_version = 100103;
386 if (!vf)
387 f2g = &gfx_v10_kfd2kgd;
388 break;
389 /* Sienna Cichlid */
390 case IP_VERSION(10, 3, 0):
391 gfx_target_version = 100300;
392 f2g = &gfx_v10_3_kfd2kgd;
393 break;
394 /* Navy Flounder */
395 case IP_VERSION(10, 3, 2):
396 gfx_target_version = 100301;
397 f2g = &gfx_v10_3_kfd2kgd;
398 break;
399 /* Van Gogh */
400 case IP_VERSION(10, 3, 1):
401 gfx_target_version = 100303;
402 if (!vf)
403 f2g = &gfx_v10_3_kfd2kgd;
404 break;
405 /* Dimgrey Cavefish */
406 case IP_VERSION(10, 3, 4):
407 gfx_target_version = 100302;
408 f2g = &gfx_v10_3_kfd2kgd;
409 break;
410 /* Beige Goby */
411 case IP_VERSION(10, 3, 5):
412 gfx_target_version = 100304;
413 f2g = &gfx_v10_3_kfd2kgd;
414 break;
415 /* Yellow Carp */
416 case IP_VERSION(10, 3, 3):
417 gfx_target_version = 100305;
418 if (!vf)
419 f2g = &gfx_v10_3_kfd2kgd;
420 break;
421 case IP_VERSION(10, 3, 6):
422 case IP_VERSION(10, 3, 7):
423 gfx_target_version = 100306;
424 if (!vf)
425 f2g = &gfx_v10_3_kfd2kgd;
426 break;
427 case IP_VERSION(11, 0, 0):
428 gfx_target_version = 110000;
429 f2g = &gfx_v11_kfd2kgd;
430 break;
431 case IP_VERSION(11, 0, 1):
432 case IP_VERSION(11, 0, 4):
433 gfx_target_version = 110003;
434 f2g = &gfx_v11_kfd2kgd;
435 break;
436 case IP_VERSION(11, 0, 2):
437 gfx_target_version = 110002;
438 f2g = &gfx_v11_kfd2kgd;
439 break;
440 case IP_VERSION(11, 0, 3):
441 /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
442 gfx_target_version = 110001;
443 f2g = &gfx_v11_kfd2kgd;
444 break;
445 case IP_VERSION(11, 5, 0):
446 gfx_target_version = 110500;
447 f2g = &gfx_v11_kfd2kgd;
448 break;
449 case IP_VERSION(11, 5, 1):
450 gfx_target_version = 110501;
451 f2g = &gfx_v11_kfd2kgd;
452 break;
453 case IP_VERSION(11, 5, 2):
454 gfx_target_version = 110502;
455 f2g = &gfx_v11_kfd2kgd;
456 break;
457 case IP_VERSION(12, 0, 0):
458 gfx_target_version = 120000;
459 f2g = &gfx_v12_kfd2kgd;
460 break;
461 case IP_VERSION(12, 0, 1):
462 gfx_target_version = 120001;
463 f2g = &gfx_v12_kfd2kgd;
464 break;
465 default:
466 break;
467 }
468 break;
469 }
470
471 if (!f2g) {
472 if (amdgpu_ip_version(adev, GC_HWIP, 0))
473 dev_info(kfd_device,
474 "GC IP %06x %s not supported in kfd\n",
475 amdgpu_ip_version(adev, GC_HWIP, 0),
476 vf ? "VF" : "");
477 else
478 dev_info(kfd_device, "%s %s not supported in kfd\n",
479 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
480 return NULL;
481 }
482
483 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
484 if (!kfd)
485 return NULL;
486
487 kfd->adev = adev;
488 kfd_device_info_init(kfd, vf, gfx_target_version);
489 kfd->init_complete = false;
490 kfd->kfd2kgd = f2g;
491 atomic_set(&kfd->compute_profile, 0);
492
493 mutex_init(&kfd->doorbell_mutex);
494
495 ida_init(&kfd->doorbell_ida);
496
497 return kfd;
498 }
499
kfd_cwsr_init(struct kfd_dev * kfd)500 static void kfd_cwsr_init(struct kfd_dev *kfd)
501 {
502 if (cwsr_enable && kfd->device_info.supports_cwsr) {
503 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
504 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex)
505 > KFD_CWSR_TMA_OFFSET);
506 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
507 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
508 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
509 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex)
510 > KFD_CWSR_TMA_OFFSET);
511 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
512 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
513 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
514 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex)
515 > KFD_CWSR_TMA_OFFSET);
516 kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
517 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
518 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
519 KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) {
520 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex)
521 > KFD_CWSR_TMA_OFFSET);
522 kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
523 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
524 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) {
525 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
526 kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex;
527 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex);
528 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
529 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
530 > KFD_CWSR_TMA_OFFSET);
531 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
532 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
533 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
534 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex)
535 > KFD_CWSR_TMA_OFFSET);
536 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
537 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
538 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
539 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex)
540 > KFD_CWSR_TMA_OFFSET);
541 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
542 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
543 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(12, 0, 0)) {
544 /* The gfx11 cwsr trap handler must fit inside a single
545 page. */
546 BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
547 kfd->cwsr_isa = cwsr_trap_gfx11_hex;
548 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
549 } else {
550 BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
551 > KFD_CWSR_TMA_OFFSET);
552 kfd->cwsr_isa = cwsr_trap_gfx12_hex;
553 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
554 }
555
556 kfd->cwsr_enabled = true;
557 }
558 }
559
kfd_gws_init(struct kfd_node * node)560 static int kfd_gws_init(struct kfd_node *node)
561 {
562 int ret = 0;
563 struct kfd_dev *kfd = node->kfd;
564 uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
565
566 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
567 return 0;
568
569 if (hws_gws_support || (KFD_IS_SOC15(node) &&
570 ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
571 && kfd->mec2_fw_version >= 0x81b3) ||
572 (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
573 && kfd->mec2_fw_version >= 0x1b3) ||
574 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
575 && kfd->mec2_fw_version >= 0x30) ||
576 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
577 && kfd->mec2_fw_version >= 0x28) ||
578 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
579 KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
580 (KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
581 (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
582 && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
583 && kfd->mec2_fw_version >= 0x6b) ||
584 (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
585 && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
586 && mes_rev >= 68))))
587 ret = amdgpu_amdkfd_alloc_gws(node->adev,
588 node->adev->gds.gws_size, &node->gws);
589
590 return ret;
591 }
592
kfd_smi_init(struct kfd_node * dev)593 static void kfd_smi_init(struct kfd_node *dev)
594 {
595 INIT_LIST_HEAD(&dev->smi_clients);
596 spin_lock_init(&dev->smi_lock);
597 }
598
kfd_init_node(struct kfd_node * node)599 static int kfd_init_node(struct kfd_node *node)
600 {
601 int err = -1;
602
603 if (kfd_interrupt_init(node)) {
604 dev_err(kfd_device, "Error initializing interrupts\n");
605 goto kfd_interrupt_error;
606 }
607
608 node->dqm = device_queue_manager_init(node);
609 if (!node->dqm) {
610 dev_err(kfd_device, "Error initializing queue manager\n");
611 goto device_queue_manager_error;
612 }
613
614 if (kfd_gws_init(node)) {
615 dev_err(kfd_device, "Could not allocate %d gws\n",
616 node->adev->gds.gws_size);
617 goto gws_error;
618 }
619
620 if (kfd_resume(node))
621 goto kfd_resume_error;
622
623 if (kfd_topology_add_device(node)) {
624 dev_err(kfd_device, "Error adding device to topology\n");
625 goto kfd_topology_add_device_error;
626 }
627
628 kfd_smi_init(node);
629
630 return 0;
631
632 kfd_topology_add_device_error:
633 kfd_resume_error:
634 gws_error:
635 device_queue_manager_uninit(node->dqm);
636 device_queue_manager_error:
637 kfd_interrupt_exit(node);
638 kfd_interrupt_error:
639 if (node->gws)
640 amdgpu_amdkfd_free_gws(node->adev, node->gws);
641
642 /* Cleanup the node memory here */
643 kfree(node);
644 return err;
645 }
646
kfd_cleanup_nodes(struct kfd_dev * kfd,unsigned int num_nodes)647 static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
648 {
649 struct kfd_node *knode;
650 unsigned int i;
651
652 /*
653 * flush_work ensures that there are no outstanding
654 * work-queue items that will access interrupt_ring. New work items
655 * can't be created because we stopped interrupt handling above.
656 */
657 flush_workqueue(kfd->ih_wq);
658 destroy_workqueue(kfd->ih_wq);
659
660 for (i = 0; i < num_nodes; i++) {
661 knode = kfd->nodes[i];
662 device_queue_manager_uninit(knode->dqm);
663 kfd_interrupt_exit(knode);
664 kfd_topology_remove_device(knode);
665 if (knode->gws)
666 amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
667 kfree(knode);
668 kfd->nodes[i] = NULL;
669 }
670 }
671
kfd_setup_interrupt_bitmap(struct kfd_node * node,unsigned int kfd_node_idx)672 static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
673 unsigned int kfd_node_idx)
674 {
675 struct amdgpu_device *adev = node->adev;
676 uint32_t xcc_mask = node->xcc_mask;
677 uint32_t xcc, mapped_xcc;
678 /*
679 * Interrupt bitmap is setup for processing interrupts from
680 * different XCDs and AIDs.
681 * Interrupt bitmap is defined as follows:
682 * 1. Bits 0-15 - correspond to the NodeId field.
683 * Each bit corresponds to NodeId number. For example, if
684 * a KFD node has interrupt bitmap set to 0x7, then this
685 * KFD node will process interrupts with NodeId = 0, 1 and 2
686 * in the IH cookie.
687 * 2. Bits 16-31 - unused.
688 *
689 * Please note that the kfd_node_idx argument passed to this
690 * function is not related to NodeId field received in the
691 * IH cookie.
692 *
693 * In CPX mode, a KFD node will process an interrupt if:
694 * - the Node Id matches the corresponding bit set in
695 * Bits 0-15.
696 * - AND VMID reported in the interrupt lies within the
697 * VMID range of the node.
698 */
699 for_each_inst(xcc, xcc_mask) {
700 mapped_xcc = GET_INST(GC, xcc);
701 node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
702 }
703 dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
704 node->interrupt_bitmap);
705 }
706
kgd2kfd_device_init(struct kfd_dev * kfd,const struct kgd2kfd_shared_resources * gpu_resources)707 bool kgd2kfd_device_init(struct kfd_dev *kfd,
708 const struct kgd2kfd_shared_resources *gpu_resources)
709 {
710 unsigned int size, map_process_packet_size, i;
711 struct kfd_node *node;
712 uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
713 unsigned int max_proc_per_quantum;
714 int partition_mode;
715 int xcp_idx;
716
717 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
718 KGD_ENGINE_MEC1);
719 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
720 KGD_ENGINE_MEC2);
721 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
722 KGD_ENGINE_SDMA1);
723 kfd->shared_resources = *gpu_resources;
724
725 kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
726
727 if (kfd->num_nodes == 0) {
728 dev_err(kfd_device,
729 "KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
730 kfd->adev->gfx.num_xcc_per_xcp);
731 goto out;
732 }
733
734 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
735 * 32 and 64-bit requests are possible and must be
736 * supported.
737 */
738 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
739 if (!kfd->pci_atomic_requested &&
740 kfd->device_info.needs_pci_atomics &&
741 (!kfd->device_info.no_atomic_fw_version ||
742 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
743 dev_info(kfd_device,
744 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
745 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
746 kfd->mec_fw_version,
747 kfd->device_info.no_atomic_fw_version);
748 return false;
749 }
750
751 first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
752 last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
753 vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
754
755 /* For multi-partition capable GPUs, we need special handling for VMIDs
756 * depending on partition mode.
757 * In CPX mode, the VMID range needs to be shared between XCDs.
758 * Additionally, there are 13 VMIDs (3-15) available for KFD. To
759 * divide them equally, we change starting VMID to 4 and not use
760 * VMID 3.
761 * If the VMID range changes for multi-partition capable GPUs, then
762 * this code MUST be revisited.
763 */
764 if (kfd->adev->xcp_mgr) {
765 partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
766 AMDGPU_XCP_FL_LOCKED);
767 if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
768 kfd->num_nodes != 1) {
769 vmid_num_kfd /= 2;
770 first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
771 }
772 }
773
774 /* Verify module parameters regarding mapped process number*/
775 if (hws_max_conc_proc >= 0)
776 max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
777 else
778 max_proc_per_quantum = vmid_num_kfd;
779
780 /* calculate max size of mqds needed for queues */
781 size = max_num_of_queues_per_device *
782 kfd->device_info.mqd_size_aligned;
783
784 /*
785 * calculate max size of runlist packet.
786 * There can be only 2 packets at once
787 */
788 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
789 sizeof(struct pm4_mes_map_process_aldebaran) :
790 sizeof(struct pm4_mes_map_process);
791 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
792 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
793 + sizeof(struct pm4_mes_runlist)) * 2;
794
795 /* Add size of HIQ & DIQ */
796 size += KFD_KERNEL_QUEUE_SIZE * 2;
797
798 /* add another 512KB for all other allocations on gart (HPD, fences) */
799 size += 512 * 1024;
800
801 if (amdgpu_amdkfd_alloc_gtt_mem(
802 kfd->adev, size, &kfd->gtt_mem,
803 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
804 false)) {
805 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
806 goto alloc_gtt_mem_failure;
807 }
808
809 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
810
811 /* Initialize GTT sa with 512 byte chunk size */
812 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
813 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
814 goto kfd_gtt_sa_init_error;
815 }
816
817 if (kfd_doorbell_init(kfd)) {
818 dev_err(kfd_device,
819 "Error initializing doorbell aperture\n");
820 goto kfd_doorbell_error;
821 }
822
823 if (amdgpu_use_xgmi_p2p)
824 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
825
826 /*
827 * For multi-partition capable GPUs, the KFD abstracts all partitions
828 * within a socket as xGMI connected in the topology so assign a unique
829 * hive id per device based on the pci device location if device is in
830 * PCIe mode.
831 */
832 if (!kfd->hive_id && kfd->num_nodes > 1)
833 kfd->hive_id = pci_dev_id(kfd->adev->pdev);
834
835 kfd->noretry = kfd->adev->gmc.noretry;
836
837 kfd_cwsr_init(kfd);
838
839 dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
840 kfd->num_nodes);
841
842 /* Allocate the KFD nodes */
843 for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
844 node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
845 if (!node)
846 goto node_alloc_error;
847
848 node->node_id = i;
849 node->adev = kfd->adev;
850 node->kfd = kfd;
851 node->kfd2kgd = kfd->kfd2kgd;
852 node->vm_info.vmid_num_kfd = vmid_num_kfd;
853 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
854 /* TODO : Check if error handling is needed */
855 if (node->xcp) {
856 amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
857 &node->xcc_mask);
858 ++xcp_idx;
859 } else {
860 node->xcc_mask =
861 (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
862 }
863
864 if (node->xcp) {
865 dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
866 node->node_id, node->xcp->mem_id,
867 KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
868 }
869
870 if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
871 kfd->num_nodes != 1) {
872 /* For multi-partition capable GPUs and CPX mode, first
873 * XCD gets VMID range 4-9 and second XCD gets VMID
874 * range 10-15.
875 */
876
877 node->vm_info.first_vmid_kfd = (i%2 == 0) ?
878 first_vmid_kfd :
879 first_vmid_kfd+vmid_num_kfd;
880 node->vm_info.last_vmid_kfd = (i%2 == 0) ?
881 last_vmid_kfd-vmid_num_kfd :
882 last_vmid_kfd;
883 node->compute_vmid_bitmap =
884 ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
885 ((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
886 } else {
887 node->vm_info.first_vmid_kfd = first_vmid_kfd;
888 node->vm_info.last_vmid_kfd = last_vmid_kfd;
889 node->compute_vmid_bitmap =
890 gpu_resources->compute_vmid_bitmap;
891 }
892 node->max_proc_per_quantum = max_proc_per_quantum;
893 atomic_set(&node->sram_ecc_flag, 0);
894
895 amdgpu_amdkfd_get_local_mem_info(kfd->adev,
896 &node->local_mem_info, node->xcp);
897
898 if (kfd->adev->xcp_mgr)
899 kfd_setup_interrupt_bitmap(node, i);
900
901 /* Initialize the KFD node */
902 if (kfd_init_node(node)) {
903 dev_err(kfd_device, "Error initializing KFD node\n");
904 goto node_init_error;
905 }
906
907 spin_lock_init(&node->watch_points_lock);
908
909 kfd->nodes[i] = node;
910 }
911
912 svm_range_set_max_pages(kfd->adev);
913
914 kfd->init_complete = true;
915 dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
916 kfd->adev->pdev->device);
917
918 pr_debug("Starting kfd with the following scheduling policy %d\n",
919 node->dqm->sched_policy);
920
921 goto out;
922
923 node_init_error:
924 node_alloc_error:
925 kfd_cleanup_nodes(kfd, i);
926 kfd_doorbell_fini(kfd);
927 kfd_doorbell_error:
928 kfd_gtt_sa_fini(kfd);
929 kfd_gtt_sa_init_error:
930 amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
931 alloc_gtt_mem_failure:
932 dev_err(kfd_device,
933 "device %x:%x NOT added due to errors\n",
934 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
935 out:
936 return kfd->init_complete;
937 }
938
kgd2kfd_device_exit(struct kfd_dev * kfd)939 void kgd2kfd_device_exit(struct kfd_dev *kfd)
940 {
941 if (kfd->init_complete) {
942 /* Cleanup KFD nodes */
943 kfd_cleanup_nodes(kfd, kfd->num_nodes);
944 /* Cleanup common/shared resources */
945 kfd_doorbell_fini(kfd);
946 ida_destroy(&kfd->doorbell_ida);
947 kfd_gtt_sa_fini(kfd);
948 amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
949 }
950
951 kfree(kfd);
952 }
953
kgd2kfd_pre_reset(struct kfd_dev * kfd,struct amdgpu_reset_context * reset_context)954 int kgd2kfd_pre_reset(struct kfd_dev *kfd,
955 struct amdgpu_reset_context *reset_context)
956 {
957 struct kfd_node *node;
958 int i;
959
960 if (!kfd->init_complete)
961 return 0;
962
963 for (i = 0; i < kfd->num_nodes; i++) {
964 node = kfd->nodes[i];
965 kfd_smi_event_update_gpu_reset(node, false, reset_context);
966 }
967
968 kgd2kfd_suspend(kfd, false);
969
970 for (i = 0; i < kfd->num_nodes; i++)
971 kfd_signal_reset_event(kfd->nodes[i]);
972
973 return 0;
974 }
975
976 /*
977 * Fix me. KFD won't be able to resume existing process for now.
978 * We will keep all existing process in a evicted state and
979 * wait the process to be terminated.
980 */
981
kgd2kfd_post_reset(struct kfd_dev * kfd)982 int kgd2kfd_post_reset(struct kfd_dev *kfd)
983 {
984 int ret;
985 struct kfd_node *node;
986 int i;
987
988 if (!kfd->init_complete)
989 return 0;
990
991 for (i = 0; i < kfd->num_nodes; i++) {
992 ret = kfd_resume(kfd->nodes[i]);
993 if (ret)
994 return ret;
995 }
996
997 mutex_lock(&kfd_processes_mutex);
998 --kfd_locked;
999 mutex_unlock(&kfd_processes_mutex);
1000
1001 for (i = 0; i < kfd->num_nodes; i++) {
1002 node = kfd->nodes[i];
1003 atomic_set(&node->sram_ecc_flag, 0);
1004 kfd_smi_event_update_gpu_reset(node, true, NULL);
1005 }
1006
1007 return 0;
1008 }
1009
kfd_is_locked(void)1010 bool kfd_is_locked(void)
1011 {
1012 lockdep_assert_held(&kfd_processes_mutex);
1013 return (kfd_locked > 0);
1014 }
1015
kgd2kfd_suspend(struct kfd_dev * kfd,bool run_pm)1016 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
1017 {
1018 struct kfd_node *node;
1019 int i;
1020
1021 if (!kfd->init_complete)
1022 return;
1023
1024 /* for runtime suspend, skip locking kfd */
1025 if (!run_pm) {
1026 mutex_lock(&kfd_processes_mutex);
1027 /* For first KFD device suspend all the KFD processes */
1028 if (++kfd_locked == 1)
1029 kfd_suspend_all_processes();
1030 mutex_unlock(&kfd_processes_mutex);
1031 }
1032
1033 for (i = 0; i < kfd->num_nodes; i++) {
1034 node = kfd->nodes[i];
1035 node->dqm->ops.stop(node->dqm);
1036 }
1037 }
1038
kgd2kfd_resume(struct kfd_dev * kfd,bool run_pm)1039 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
1040 {
1041 int ret, i;
1042
1043 if (!kfd->init_complete)
1044 return 0;
1045
1046 for (i = 0; i < kfd->num_nodes; i++) {
1047 ret = kfd_resume(kfd->nodes[i]);
1048 if (ret)
1049 return ret;
1050 }
1051
1052 /* for runtime resume, skip unlocking kfd */
1053 if (!run_pm) {
1054 mutex_lock(&kfd_processes_mutex);
1055 if (--kfd_locked == 0)
1056 ret = kfd_resume_all_processes();
1057 WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
1058 mutex_unlock(&kfd_processes_mutex);
1059 }
1060
1061 return ret;
1062 }
1063
kfd_resume(struct kfd_node * node)1064 static int kfd_resume(struct kfd_node *node)
1065 {
1066 int err = 0;
1067
1068 err = node->dqm->ops.start(node->dqm);
1069 if (err)
1070 dev_err(kfd_device,
1071 "Error starting queue manager for device %x:%x\n",
1072 node->adev->pdev->vendor, node->adev->pdev->device);
1073
1074 return err;
1075 }
1076
1077 /* This is called directly from KGD at ISR. */
kgd2kfd_interrupt(struct kfd_dev * kfd,const void * ih_ring_entry)1078 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1079 {
1080 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
1081 bool is_patched = false;
1082 unsigned long flags;
1083 struct kfd_node *node;
1084
1085 if (!kfd->init_complete)
1086 return;
1087
1088 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
1089 dev_err_once(kfd_device, "Ring entry too small\n");
1090 return;
1091 }
1092
1093 for (i = 0; i < kfd->num_nodes; i++) {
1094 node = kfd->nodes[i];
1095 spin_lock_irqsave(&node->interrupt_lock, flags);
1096
1097 if (node->interrupts_active
1098 && interrupt_is_wanted(node, ih_ring_entry,
1099 patched_ihre, &is_patched)
1100 && enqueue_ih_ring_entry(node,
1101 is_patched ? patched_ihre : ih_ring_entry)) {
1102 queue_work(node->kfd->ih_wq, &node->interrupt_work);
1103 spin_unlock_irqrestore(&node->interrupt_lock, flags);
1104 return;
1105 }
1106 spin_unlock_irqrestore(&node->interrupt_lock, flags);
1107 }
1108
1109 }
1110
kgd2kfd_quiesce_mm(struct mm_struct * mm,uint32_t trigger)1111 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
1112 {
1113 struct kfd_process *p;
1114 int r;
1115
1116 /* Because we are called from arbitrary context (workqueue) as opposed
1117 * to process context, kfd_process could attempt to exit while we are
1118 * running so the lookup function increments the process ref count.
1119 */
1120 p = kfd_lookup_process_by_mm(mm);
1121 if (!p)
1122 return -ESRCH;
1123
1124 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1125 r = kfd_process_evict_queues(p, trigger);
1126
1127 kfd_unref_process(p);
1128 return r;
1129 }
1130
kgd2kfd_resume_mm(struct mm_struct * mm)1131 int kgd2kfd_resume_mm(struct mm_struct *mm)
1132 {
1133 struct kfd_process *p;
1134 int r;
1135
1136 /* Because we are called from arbitrary context (workqueue) as opposed
1137 * to process context, kfd_process could attempt to exit while we are
1138 * running so the lookup function increments the process ref count.
1139 */
1140 p = kfd_lookup_process_by_mm(mm);
1141 if (!p)
1142 return -ESRCH;
1143
1144 r = kfd_process_restore_queues(p);
1145
1146 kfd_unref_process(p);
1147 return r;
1148 }
1149
1150 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1151 * prepare for safe eviction of KFD BOs that belong to the specified
1152 * process.
1153 *
1154 * @mm: mm_struct that identifies the specified KFD process
1155 * @fence: eviction fence attached to KFD process BOs
1156 *
1157 */
kgd2kfd_schedule_evict_and_restore_process(struct mm_struct * mm,struct dma_fence * fence)1158 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1159 struct dma_fence *fence)
1160 {
1161 struct kfd_process *p;
1162 unsigned long active_time;
1163 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1164
1165 if (!fence)
1166 return -EINVAL;
1167
1168 if (dma_fence_is_signaled(fence))
1169 return 0;
1170
1171 p = kfd_lookup_process_by_mm(mm);
1172 if (!p)
1173 return -ENODEV;
1174
1175 if (fence->seqno == p->last_eviction_seqno)
1176 goto out;
1177
1178 p->last_eviction_seqno = fence->seqno;
1179
1180 /* Avoid KFD process starvation. Wait for at least
1181 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1182 */
1183 active_time = get_jiffies_64() - p->last_restore_timestamp;
1184 if (delay_jiffies > active_time)
1185 delay_jiffies -= active_time;
1186 else
1187 delay_jiffies = 0;
1188
1189 /* During process initialization eviction_work.dwork is initialized
1190 * to kfd_evict_bo_worker
1191 */
1192 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1193 p->lead_thread->pid, delay_jiffies);
1194 schedule_delayed_work(&p->eviction_work, delay_jiffies);
1195 out:
1196 kfd_unref_process(p);
1197 return 0;
1198 }
1199
kfd_gtt_sa_init(struct kfd_dev * kfd,unsigned int buf_size,unsigned int chunk_size)1200 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1201 unsigned int chunk_size)
1202 {
1203 if (WARN_ON(buf_size < chunk_size))
1204 return -EINVAL;
1205 if (WARN_ON(buf_size == 0))
1206 return -EINVAL;
1207 if (WARN_ON(chunk_size == 0))
1208 return -EINVAL;
1209
1210 kfd->gtt_sa_chunk_size = chunk_size;
1211 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1212
1213 kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1214 GFP_KERNEL);
1215 if (!kfd->gtt_sa_bitmap)
1216 return -ENOMEM;
1217
1218 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1219 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1220
1221 mutex_init(&kfd->gtt_sa_lock);
1222
1223 return 0;
1224 }
1225
kfd_gtt_sa_fini(struct kfd_dev * kfd)1226 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1227 {
1228 mutex_destroy(&kfd->gtt_sa_lock);
1229 bitmap_free(kfd->gtt_sa_bitmap);
1230 }
1231
kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,unsigned int bit_num,unsigned int chunk_size)1232 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1233 unsigned int bit_num,
1234 unsigned int chunk_size)
1235 {
1236 return start_addr + bit_num * chunk_size;
1237 }
1238
kfd_gtt_sa_calc_cpu_addr(void * start_addr,unsigned int bit_num,unsigned int chunk_size)1239 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1240 unsigned int bit_num,
1241 unsigned int chunk_size)
1242 {
1243 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1244 }
1245
kfd_gtt_sa_allocate(struct kfd_node * node,unsigned int size,struct kfd_mem_obj ** mem_obj)1246 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1247 struct kfd_mem_obj **mem_obj)
1248 {
1249 unsigned int found, start_search, cur_size;
1250 struct kfd_dev *kfd = node->kfd;
1251
1252 if (size == 0)
1253 return -EINVAL;
1254
1255 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1256 return -ENOMEM;
1257
1258 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1259 if (!(*mem_obj))
1260 return -ENOMEM;
1261
1262 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1263
1264 start_search = 0;
1265
1266 mutex_lock(&kfd->gtt_sa_lock);
1267
1268 kfd_gtt_restart_search:
1269 /* Find the first chunk that is free */
1270 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1271 kfd->gtt_sa_num_of_chunks,
1272 start_search);
1273
1274 pr_debug("Found = %d\n", found);
1275
1276 /* If there wasn't any free chunk, bail out */
1277 if (found == kfd->gtt_sa_num_of_chunks)
1278 goto kfd_gtt_no_free_chunk;
1279
1280 /* Update fields of mem_obj */
1281 (*mem_obj)->range_start = found;
1282 (*mem_obj)->range_end = found;
1283 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1284 kfd->gtt_start_gpu_addr,
1285 found,
1286 kfd->gtt_sa_chunk_size);
1287 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1288 kfd->gtt_start_cpu_ptr,
1289 found,
1290 kfd->gtt_sa_chunk_size);
1291
1292 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1293 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1294
1295 /* If we need only one chunk, mark it as allocated and get out */
1296 if (size <= kfd->gtt_sa_chunk_size) {
1297 pr_debug("Single bit\n");
1298 __set_bit(found, kfd->gtt_sa_bitmap);
1299 goto kfd_gtt_out;
1300 }
1301
1302 /* Otherwise, try to see if we have enough contiguous chunks */
1303 cur_size = size - kfd->gtt_sa_chunk_size;
1304 do {
1305 (*mem_obj)->range_end =
1306 find_next_zero_bit(kfd->gtt_sa_bitmap,
1307 kfd->gtt_sa_num_of_chunks, ++found);
1308 /*
1309 * If next free chunk is not contiguous than we need to
1310 * restart our search from the last free chunk we found (which
1311 * wasn't contiguous to the previous ones
1312 */
1313 if ((*mem_obj)->range_end != found) {
1314 start_search = found;
1315 goto kfd_gtt_restart_search;
1316 }
1317
1318 /*
1319 * If we reached end of buffer, bail out with error
1320 */
1321 if (found == kfd->gtt_sa_num_of_chunks)
1322 goto kfd_gtt_no_free_chunk;
1323
1324 /* Check if we don't need another chunk */
1325 if (cur_size <= kfd->gtt_sa_chunk_size)
1326 cur_size = 0;
1327 else
1328 cur_size -= kfd->gtt_sa_chunk_size;
1329
1330 } while (cur_size > 0);
1331
1332 pr_debug("range_start = %d, range_end = %d\n",
1333 (*mem_obj)->range_start, (*mem_obj)->range_end);
1334
1335 /* Mark the chunks as allocated */
1336 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1337 (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1338
1339 kfd_gtt_out:
1340 mutex_unlock(&kfd->gtt_sa_lock);
1341 return 0;
1342
1343 kfd_gtt_no_free_chunk:
1344 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1345 mutex_unlock(&kfd->gtt_sa_lock);
1346 kfree(*mem_obj);
1347 return -ENOMEM;
1348 }
1349
kfd_gtt_sa_free(struct kfd_node * node,struct kfd_mem_obj * mem_obj)1350 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
1351 {
1352 struct kfd_dev *kfd = node->kfd;
1353
1354 /* Act like kfree when trying to free a NULL object */
1355 if (!mem_obj)
1356 return 0;
1357
1358 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1359 mem_obj, mem_obj->range_start, mem_obj->range_end);
1360
1361 mutex_lock(&kfd->gtt_sa_lock);
1362
1363 /* Mark the chunks as free */
1364 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1365 mem_obj->range_end - mem_obj->range_start + 1);
1366
1367 mutex_unlock(&kfd->gtt_sa_lock);
1368
1369 kfree(mem_obj);
1370 return 0;
1371 }
1372
kgd2kfd_set_sram_ecc_flag(struct kfd_dev * kfd)1373 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1374 {
1375 /*
1376 * TODO: Currently update SRAM ECC flag for first node.
1377 * This needs to be updated later when we can
1378 * identify SRAM ECC error on other nodes also.
1379 */
1380 if (kfd)
1381 atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
1382 }
1383
kfd_inc_compute_active(struct kfd_node * node)1384 void kfd_inc_compute_active(struct kfd_node *node)
1385 {
1386 if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1387 amdgpu_amdkfd_set_compute_idle(node->adev, false);
1388 }
1389
kfd_dec_compute_active(struct kfd_node * node)1390 void kfd_dec_compute_active(struct kfd_node *node)
1391 {
1392 int count = atomic_dec_return(&node->kfd->compute_profile);
1393
1394 if (count == 0)
1395 amdgpu_amdkfd_set_compute_idle(node->adev, true);
1396 WARN_ONCE(count < 0, "Compute profile ref. count error");
1397 }
1398
kfd_compute_active(struct kfd_node * node)1399 static bool kfd_compute_active(struct kfd_node *node)
1400 {
1401 if (atomic_read(&node->kfd->compute_profile))
1402 return true;
1403 return false;
1404 }
1405
kgd2kfd_smi_event_throttle(struct kfd_dev * kfd,uint64_t throttle_bitmask)1406 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1407 {
1408 /*
1409 * TODO: For now, raise the throttling event only on first node.
1410 * This will need to change after we are able to determine
1411 * which node raised the throttling event.
1412 */
1413 if (kfd && kfd->init_complete)
1414 kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
1415 throttle_bitmask);
1416 }
1417
1418 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1419 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1420 * When the device has more than two engines, we reserve two for PCIe to enable
1421 * full-duplex and the rest are used as XGMI.
1422 */
kfd_get_num_sdma_engines(struct kfd_node * node)1423 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1424 {
1425 /* If XGMI is not supported, all SDMA engines are PCIe */
1426 if (!node->adev->gmc.xgmi.supported)
1427 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
1428
1429 return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
1430 }
1431
kfd_get_num_xgmi_sdma_engines(struct kfd_node * node)1432 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1433 {
1434 /* After reserved for PCIe, the rest of engines are XGMI */
1435 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
1436 kfd_get_num_sdma_engines(node);
1437 }
1438
kgd2kfd_check_and_lock_kfd(void)1439 int kgd2kfd_check_and_lock_kfd(void)
1440 {
1441 mutex_lock(&kfd_processes_mutex);
1442 if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
1443 mutex_unlock(&kfd_processes_mutex);
1444 return -EBUSY;
1445 }
1446
1447 ++kfd_locked;
1448 mutex_unlock(&kfd_processes_mutex);
1449
1450 return 0;
1451 }
1452
kgd2kfd_unlock_kfd(void)1453 void kgd2kfd_unlock_kfd(void)
1454 {
1455 mutex_lock(&kfd_processes_mutex);
1456 --kfd_locked;
1457 mutex_unlock(&kfd_processes_mutex);
1458 }
1459
kgd2kfd_start_sched(struct kfd_dev * kfd,uint32_t node_id)1460 int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
1461 {
1462 struct kfd_node *node;
1463 int ret;
1464
1465 if (!kfd->init_complete)
1466 return 0;
1467
1468 if (node_id >= kfd->num_nodes) {
1469 dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1470 node_id, kfd->num_nodes - 1);
1471 return -EINVAL;
1472 }
1473 node = kfd->nodes[node_id];
1474
1475 ret = node->dqm->ops.unhalt(node->dqm);
1476 if (ret)
1477 dev_err(kfd_device, "Error in starting scheduler\n");
1478
1479 return ret;
1480 }
1481
kgd2kfd_stop_sched(struct kfd_dev * kfd,uint32_t node_id)1482 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
1483 {
1484 struct kfd_node *node;
1485
1486 if (!kfd->init_complete)
1487 return 0;
1488
1489 if (node_id >= kfd->num_nodes) {
1490 dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1491 node_id, kfd->num_nodes - 1);
1492 return -EINVAL;
1493 }
1494
1495 node = kfd->nodes[node_id];
1496 return node->dqm->ops.halt(node->dqm);
1497 }
1498
kgd2kfd_compute_active(struct kfd_dev * kfd,uint32_t node_id)1499 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
1500 {
1501 struct kfd_node *node;
1502
1503 if (!kfd->init_complete)
1504 return false;
1505
1506 if (node_id >= kfd->num_nodes) {
1507 dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
1508 node_id, kfd->num_nodes - 1);
1509 return false;
1510 }
1511
1512 node = kfd->nodes[node_id];
1513
1514 return kfd_compute_active(node);
1515 }
1516
1517 /**
1518 * kgd2kfd_vmfault_fast_path() - KFD vm page fault interrupt handling fast path for gmc v9
1519 * @adev: amdgpu device
1520 * @entry: vm fault interrupt vector
1521 * @retry_fault: if this is retry fault
1522 *
1523 * retry fault -
1524 * with CAM enabled, adev primary ring
1525 * | gmc_v9_0_process_interrupt()
1526 * adev soft_ring
1527 * | gmc_v9_0_process_interrupt() worker failed to recover page fault
1528 * KFD node ih_fifo
1529 * | KFD interrupt_wq worker
1530 * kfd_signal_vm_fault_event
1531 *
1532 * without CAM, adev primary ring1
1533 * | gmc_v9_0_process_interrupt worker failed to recvoer page fault
1534 * KFD node ih_fifo
1535 * | KFD interrupt_wq worker
1536 * kfd_signal_vm_fault_event
1537 *
1538 * no-retry fault -
1539 * adev primary ring
1540 * | gmc_v9_0_process_interrupt()
1541 * KFD node ih_fifo
1542 * | KFD interrupt_wq worker
1543 * kfd_signal_vm_fault_event
1544 *
1545 * fast path - After kfd_signal_vm_fault_event, gmc_v9_0_process_interrupt drop the page fault
1546 * of same process, don't copy interrupt to KFD node ih_fifo.
1547 * With gdb debugger enabled, need convert the retry fault to no-retry fault for
1548 * debugger, cannot use the fast path.
1549 *
1550 * Return:
1551 * true - use the fast path to handle this fault
1552 * false - use normal path to handle it
1553 */
kgd2kfd_vmfault_fast_path(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry,bool retry_fault)1554 bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
1555 bool retry_fault)
1556 {
1557 struct kfd_process *p;
1558 u32 cam_index;
1559
1560 if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
1561 p = kfd_lookup_process_by_pasid(entry->pasid);
1562 if (!p)
1563 return true;
1564
1565 if (p->gpu_page_fault && !p->debug_trap_enabled) {
1566 if (retry_fault && adev->irq.retry_cam_enabled) {
1567 cam_index = entry->src_data[2] & 0x3ff;
1568 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
1569 }
1570
1571 kfd_unref_process(p);
1572 return true;
1573 }
1574
1575 /*
1576 * This is the first page fault, set flag and then signal user space
1577 */
1578 p->gpu_page_fault = true;
1579 kfd_unref_process(p);
1580 }
1581 return false;
1582 }
1583
1584 #if defined(CONFIG_DEBUG_FS)
1585
1586 /* This function will send a package to HIQ to hang the HWS
1587 * which will trigger a GPU reset and bring the HWS back to normal state
1588 */
kfd_debugfs_hang_hws(struct kfd_node * dev)1589 int kfd_debugfs_hang_hws(struct kfd_node *dev)
1590 {
1591 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1592 pr_err("HWS is not enabled");
1593 return -EINVAL;
1594 }
1595
1596 return dqm_debugfs_hang_hws(dev->dqm);
1597 }
1598
1599 #endif
1600