1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
36 #include "amdgpu_mes.h"
37 #include "nvd.h"
38
39 /* delay 0.1 second to enable gfx off feature */
40 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
41
42 #define GFX_OFF_NO_DELAY 0
43
44 /*
45 * GPU GFX IP block helpers function.
46 */
47
amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device * adev,int mec,int pipe,int queue)48 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
49 int pipe, int queue)
50 {
51 int bit = 0;
52
53 bit += mec * adev->gfx.mec.num_pipe_per_mec
54 * adev->gfx.mec.num_queue_per_pipe;
55 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
56 bit += queue;
57
58 return bit;
59 }
60
amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device * adev,int bit,int * mec,int * pipe,int * queue)61 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
62 int *mec, int *pipe, int *queue)
63 {
64 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
65 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
66 % adev->gfx.mec.num_pipe_per_mec;
67 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
68 / adev->gfx.mec.num_pipe_per_mec;
69
70 }
71
amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device * adev,int xcc_id,int mec,int pipe,int queue)72 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
73 int xcc_id, int mec, int pipe, int queue)
74 {
75 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
76 adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
77 }
78
amdgpu_gfx_me_queue_to_bit(struct amdgpu_device * adev,int me,int pipe,int queue)79 static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
80 int me, int pipe, int queue)
81 {
82 int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
83 int bit = 0;
84
85 bit += me * adev->gfx.me.num_pipe_per_me
86 * num_queue_per_pipe;
87 bit += pipe * num_queue_per_pipe;
88 bit += queue;
89
90 return bit;
91 }
92
amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device * adev,int me,int pipe,int queue)93 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
94 int me, int pipe, int queue)
95 {
96 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
97 adev->gfx.me.queue_bitmap);
98 }
99
100 /**
101 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
102 *
103 * @adev: amdgpu device pointer
104 * @mask: array in which the per-shader array disable masks will be stored
105 * @max_se: number of SEs
106 * @max_sh: number of SHs
107 *
108 * The bitmask of CUs to be disabled in the shader array determined by se and
109 * sh is stored in mask[se * max_sh + sh].
110 */
amdgpu_gfx_parse_disable_cu(struct amdgpu_device * adev,unsigned int * mask,unsigned int max_se,unsigned int max_sh)111 void amdgpu_gfx_parse_disable_cu(struct amdgpu_device *adev, unsigned int *mask,
112 unsigned int max_se, unsigned int max_sh)
113 {
114 unsigned int se, sh, cu;
115 const char *p;
116
117 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
118
119 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
120 return;
121
122 p = amdgpu_disable_cu;
123 for (;;) {
124 char *next;
125 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
126
127 if (ret < 3) {
128 drm_err(adev_to_drm(adev), "could not parse disable_cu\n");
129 return;
130 }
131
132 if (se < max_se && sh < max_sh && cu < 16) {
133 drm_info(adev_to_drm(adev), "Disabling CU %u.%u.%u\n", se, sh, cu);
134 mask[se * max_sh + sh] |= 1u << cu;
135 } else {
136 drm_err(adev_to_drm(adev), "disable_cu %u.%u.%u is out of range\n",
137 se, sh, cu);
138 }
139
140 next = strchr(p, ',');
141 if (!next)
142 break;
143 p = next + 1;
144 }
145 }
146
amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device * adev)147 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
148 {
149 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
150 }
151
amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device * adev)152 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
153 {
154 if (amdgpu_compute_multipipe != -1) {
155 dev_info(adev->dev, " forcing compute pipe policy %d\n",
156 amdgpu_compute_multipipe);
157 return amdgpu_compute_multipipe == 1;
158 }
159
160 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
161 return true;
162
163 /* FIXME: spreading the queues across pipes causes perf regressions
164 * on POLARIS11 compute workloads */
165 if (adev->asic_type == CHIP_POLARIS11)
166 return false;
167
168 return adev->gfx.mec.num_mec > 1;
169 }
170
amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)171 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
172 struct amdgpu_ring *ring)
173 {
174 int queue = ring->queue;
175 int pipe = ring->pipe;
176
177 /* Policy: use pipe1 queue0 as high priority graphics queue if we
178 * have more than one gfx pipe.
179 */
180 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
181 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
182 int me = ring->me;
183 int bit;
184
185 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
186 if (ring == &adev->gfx.gfx_ring[bit])
187 return true;
188 }
189
190 return false;
191 }
192
amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)193 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
194 struct amdgpu_ring *ring)
195 {
196 /* Policy: use 1st queue as high priority compute queue if we
197 * have more than one compute queue.
198 */
199 if (adev->gfx.num_compute_rings > 1 &&
200 ring == &adev->gfx.compute_ring[0])
201 return true;
202
203 return false;
204 }
205
amdgpu_gfx_compute_queue_acquire(struct amdgpu_device * adev)206 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
207 {
208 int i, j, queue, pipe;
209 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
210 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
211 adev->gfx.mec.num_queue_per_pipe,
212 adev->gfx.num_compute_rings);
213 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
214
215 if (multipipe_policy) {
216 /* policy: make queues evenly cross all pipes on MEC1 only
217 * for multiple xcc, just use the original policy for simplicity */
218 for (j = 0; j < num_xcc; j++) {
219 for (i = 0; i < max_queues_per_mec; i++) {
220 pipe = i % adev->gfx.mec.num_pipe_per_mec;
221 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
222 adev->gfx.mec.num_queue_per_pipe;
223
224 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
225 adev->gfx.mec_bitmap[j].queue_bitmap);
226 }
227 }
228 } else {
229 /* policy: amdgpu owns all queues in the given pipe */
230 for (j = 0; j < num_xcc; j++) {
231 for (i = 0; i < max_queues_per_mec; ++i)
232 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
233 }
234 }
235
236 for (j = 0; j < num_xcc; j++) {
237 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
238 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
239 }
240 }
241
amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device * adev)242 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
243 {
244 int i, queue, pipe;
245 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
246 int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
247 int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
248
249 if (multipipe_policy) {
250 /* policy: amdgpu owns the first queue per pipe at this stage
251 * will extend to mulitple queues per pipe later */
252 for (i = 0; i < max_queues_per_me; i++) {
253 pipe = i % adev->gfx.me.num_pipe_per_me;
254 queue = (i / adev->gfx.me.num_pipe_per_me) %
255 num_queue_per_pipe;
256
257 set_bit(pipe * num_queue_per_pipe + queue,
258 adev->gfx.me.queue_bitmap);
259 }
260 } else {
261 for (i = 0; i < max_queues_per_me; ++i)
262 set_bit(i, adev->gfx.me.queue_bitmap);
263 }
264
265 /* update the number of active graphics rings */
266 if (adev->gfx.num_gfx_rings)
267 adev->gfx.num_gfx_rings =
268 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
269 }
270
amdgpu_gfx_kiq_acquire(struct amdgpu_device * adev,struct amdgpu_ring * ring,int xcc_id)271 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
272 struct amdgpu_ring *ring, int xcc_id)
273 {
274 int queue_bit;
275 int mec, pipe, queue;
276
277 queue_bit = adev->gfx.mec.num_mec
278 * adev->gfx.mec.num_pipe_per_mec
279 * adev->gfx.mec.num_queue_per_pipe;
280
281 while (--queue_bit >= 0) {
282 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
283 continue;
284
285 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
286
287 /*
288 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
289 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
290 * only can be issued on queue 0.
291 */
292 if ((mec == 1 && pipe > 1) || queue != 0)
293 continue;
294
295 ring->me = mec + 1;
296 ring->pipe = pipe;
297 ring->queue = queue;
298
299 return 0;
300 }
301
302 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
303 return -EINVAL;
304 }
305
amdgpu_gfx_kiq_init_ring(struct amdgpu_device * adev,int xcc_id)306 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
307 {
308 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
309 struct amdgpu_irq_src *irq = &kiq->irq;
310 struct amdgpu_ring *ring = &kiq->ring;
311 int r = 0;
312
313 spin_lock_init(&kiq->ring_lock);
314
315 ring->adev = NULL;
316 ring->ring_obj = NULL;
317 ring->use_doorbell = true;
318 ring->xcc_id = xcc_id;
319 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
320 ring->doorbell_index =
321 (adev->doorbell_index.kiq +
322 xcc_id * adev->doorbell_index.xcc_doorbell_range)
323 << 1;
324
325 r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
326 if (r)
327 return r;
328
329 ring->eop_gpu_addr = kiq->eop_gpu_addr;
330 ring->no_scheduler = true;
331 snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
332 (unsigned char)xcc_id, (unsigned char)ring->me,
333 (unsigned char)ring->pipe, (unsigned char)ring->queue);
334 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
335 AMDGPU_RING_PRIO_DEFAULT, NULL);
336 if (r)
337 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
338
339 return r;
340 }
341
amdgpu_gfx_kiq_free_ring(struct amdgpu_ring * ring)342 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
343 {
344 amdgpu_ring_fini(ring);
345 }
346
amdgpu_gfx_kiq_fini(struct amdgpu_device * adev,int xcc_id)347 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
348 {
349 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
350
351 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
352 }
353
amdgpu_gfx_kiq_init(struct amdgpu_device * adev,unsigned int hpd_size,int xcc_id)354 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
355 unsigned int hpd_size, int xcc_id)
356 {
357 int r;
358 u32 *hpd;
359 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
360
361 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
362 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
363 &kiq->eop_gpu_addr, (void **)&hpd);
364 if (r) {
365 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
366 return r;
367 }
368
369 memset(hpd, 0, hpd_size);
370
371 r = amdgpu_bo_reserve(kiq->eop_obj, true);
372 if (unlikely(r != 0))
373 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
374 amdgpu_bo_kunmap(kiq->eop_obj);
375 amdgpu_bo_unreserve(kiq->eop_obj);
376
377 return 0;
378 }
379
380 /* create MQD for each compute/gfx queue */
amdgpu_gfx_mqd_sw_init(struct amdgpu_device * adev,unsigned int mqd_size,int xcc_id)381 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
382 unsigned int mqd_size, int xcc_id)
383 {
384 int r, i, j;
385 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
386 struct amdgpu_ring *ring = &kiq->ring;
387 u32 domain = AMDGPU_GEM_DOMAIN_GTT;
388 u32 gfx_mqd_size = max(adev->mqds[AMDGPU_HW_IP_GFX].mqd_size, mqd_size);
389 u32 compute_mqd_size = max(adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size, mqd_size);
390
391 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
392 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
393 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
394 domain |= AMDGPU_GEM_DOMAIN_VRAM;
395 #endif
396
397 /* create MQD for KIQ */
398 if (!adev->enable_mes_kiq && !ring->mqd_obj) {
399 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
400 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
401 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
402 * KIQ MQD no matter SRIOV or Bare-metal
403 */
404 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
405 AMDGPU_GEM_DOMAIN_VRAM |
406 AMDGPU_GEM_DOMAIN_GTT,
407 &ring->mqd_obj,
408 &ring->mqd_gpu_addr,
409 &ring->mqd_ptr);
410 if (r) {
411 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
412 return r;
413 }
414
415 /* prepare MQD backup */
416 kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
417 if (!kiq->mqd_backup) {
418 dev_warn(adev->dev,
419 "no memory to create MQD backup for ring %s\n", ring->name);
420 return -ENOMEM;
421 }
422 }
423
424 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
425 /* create MQD for each KGQ */
426 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
427 ring = &adev->gfx.gfx_ring[i];
428 if (!ring->mqd_obj) {
429 r = amdgpu_bo_create_kernel(adev, AMDGPU_MQD_SIZE_ALIGN(gfx_mqd_size),
430 PAGE_SIZE, domain, &ring->mqd_obj,
431 &ring->mqd_gpu_addr, &ring->mqd_ptr);
432 if (r) {
433 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
434 return r;
435 }
436
437 ring->mqd_size = gfx_mqd_size;
438 /* prepare MQD backup */
439 adev->gfx.me.mqd_backup[i] = kzalloc(gfx_mqd_size, GFP_KERNEL);
440 if (!adev->gfx.me.mqd_backup[i]) {
441 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
442 return -ENOMEM;
443 }
444 }
445 }
446 }
447
448 /* create MQD for each KCQ */
449 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
450 j = i + xcc_id * adev->gfx.num_compute_rings;
451 ring = &adev->gfx.compute_ring[j];
452 if (!ring->mqd_obj) {
453 r = amdgpu_bo_create_kernel(adev, AMDGPU_MQD_SIZE_ALIGN(compute_mqd_size),
454 PAGE_SIZE, domain, &ring->mqd_obj,
455 &ring->mqd_gpu_addr, &ring->mqd_ptr);
456 if (r) {
457 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
458 return r;
459 }
460
461 ring->mqd_size = compute_mqd_size;
462 /* prepare MQD backup */
463 adev->gfx.mec.mqd_backup[j] = kzalloc(compute_mqd_size, GFP_KERNEL);
464 if (!adev->gfx.mec.mqd_backup[j]) {
465 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
466 return -ENOMEM;
467 }
468 }
469 }
470
471 return 0;
472 }
473
amdgpu_gfx_mqd_sw_fini(struct amdgpu_device * adev,int xcc_id)474 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
475 {
476 struct amdgpu_ring *ring = NULL;
477 int i, j;
478 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
479
480 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
481 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
482 ring = &adev->gfx.gfx_ring[i];
483 kfree(adev->gfx.me.mqd_backup[i]);
484 amdgpu_bo_free_kernel(&ring->mqd_obj,
485 &ring->mqd_gpu_addr,
486 &ring->mqd_ptr);
487 }
488 }
489
490 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
491 j = i + xcc_id * adev->gfx.num_compute_rings;
492 ring = &adev->gfx.compute_ring[j];
493 kfree(adev->gfx.mec.mqd_backup[j]);
494 amdgpu_bo_free_kernel(&ring->mqd_obj,
495 &ring->mqd_gpu_addr,
496 &ring->mqd_ptr);
497 }
498
499 ring = &kiq->ring;
500 kfree(kiq->mqd_backup);
501 amdgpu_bo_free_kernel(&ring->mqd_obj,
502 &ring->mqd_gpu_addr,
503 &ring->mqd_ptr);
504 }
505
amdgpu_gfx_mqd_symmetrically_map_cu_mask(struct amdgpu_device * adev,const uint32_t * cu_mask,uint32_t cu_mask_count,uint32_t * se_mask)506 void amdgpu_gfx_mqd_symmetrically_map_cu_mask(struct amdgpu_device *adev, const uint32_t *cu_mask,
507 uint32_t cu_mask_count, uint32_t *se_mask)
508 {
509 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
510 struct amdgpu_gfx_config *gfx_info = &adev->gfx.config;
511 uint32_t cu_per_sh[8][4] = {0};
512 int i, se, sh, cu, cu_bitmap_sh_mul;
513 int xcc_inst = ffs(adev->gfx.xcc_mask) - 1;
514 bool wgp_mode_req = amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0);
515 int cu_inc = wgp_mode_req ? 2 : 1;
516 uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
517 int num_xcc, inc, inst = 0;
518
519 if (xcc_inst < 0)
520 xcc_inst = 0;
521
522 num_xcc = hweight16(adev->gfx.xcc_mask);
523 if (!num_xcc)
524 num_xcc = 1;
525
526 inc = cu_inc * num_xcc;
527
528 cu_bitmap_sh_mul = 2;
529
530 for (se = 0; se < gfx_info->max_shader_engines; se++)
531 for (sh = 0; sh < gfx_info->max_sh_per_se; sh++)
532 cu_per_sh[se][sh] = hweight32(
533 cu_info->bitmap[xcc_inst][se % 4][sh + (se / 4) *
534 cu_bitmap_sh_mul]);
535
536 for (i = 0; i < gfx_info->max_shader_engines; i++)
537 se_mask[i] = 0;
538
539 i = inst;
540 for (cu = 0; cu < 16; cu += cu_inc) {
541 for (sh = 0; sh < gfx_info->max_sh_per_se; sh++) {
542 for (se = 0; se < gfx_info->max_shader_engines; se++) {
543 if (cu_per_sh[se][sh] > cu) {
544 if ((i / 32) < cu_mask_count && (cu_mask[i / 32] & (1 << (i % 32))))
545 se_mask[se] |= en_mask << (cu + sh * 16);
546 i += inc;
547 if (i >= cu_mask_count * 32)
548 return;
549 }
550 }
551 }
552 }
553 }
554
amdgpu_gfx_disable_kcq(struct amdgpu_device * adev,int xcc_id)555 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
556 {
557 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
558 struct amdgpu_ring *kiq_ring = &kiq->ring;
559 int i, r = 0;
560 int j;
561
562 if (adev->enable_mes) {
563 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
564 j = i + xcc_id * adev->gfx.num_compute_rings;
565 amdgpu_mes_unmap_legacy_queue(adev,
566 &adev->gfx.compute_ring[j],
567 RESET_QUEUES, 0, 0, xcc_id);
568 }
569 return 0;
570 }
571
572 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
573 return -EINVAL;
574
575 if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
576 return 0;
577
578 spin_lock(&kiq->ring_lock);
579 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
580 adev->gfx.num_compute_rings)) {
581 spin_unlock(&kiq->ring_lock);
582 return -ENOMEM;
583 }
584
585 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
586 j = i + xcc_id * adev->gfx.num_compute_rings;
587 kiq->pmf->kiq_unmap_queues(kiq_ring,
588 &adev->gfx.compute_ring[j],
589 RESET_QUEUES, 0, 0);
590 }
591 /* Submit unmap queue packet */
592 amdgpu_ring_commit(kiq_ring);
593 /*
594 * Ring test will do a basic scratch register change check. Just run
595 * this to ensure that unmap queues that is submitted before got
596 * processed successfully before returning.
597 */
598 r = amdgpu_ring_test_helper(kiq_ring);
599
600 spin_unlock(&kiq->ring_lock);
601
602 return r;
603 }
604
amdgpu_gfx_disable_kgq(struct amdgpu_device * adev,int xcc_id)605 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
606 {
607 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
608 struct amdgpu_ring *kiq_ring = &kiq->ring;
609 int i, r = 0;
610 int j;
611
612 if (adev->enable_mes) {
613 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
614 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
615 j = i + xcc_id * adev->gfx.num_gfx_rings;
616 amdgpu_mes_unmap_legacy_queue(adev,
617 &adev->gfx.gfx_ring[j],
618 PREEMPT_QUEUES, 0, 0, xcc_id);
619 }
620 }
621 return 0;
622 }
623
624 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
625 return -EINVAL;
626
627 if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
628 return 0;
629
630 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
631 spin_lock(&kiq->ring_lock);
632 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
633 adev->gfx.num_gfx_rings)) {
634 spin_unlock(&kiq->ring_lock);
635 return -ENOMEM;
636 }
637
638 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
639 j = i + xcc_id * adev->gfx.num_gfx_rings;
640 kiq->pmf->kiq_unmap_queues(kiq_ring,
641 &adev->gfx.gfx_ring[j],
642 PREEMPT_QUEUES, 0, 0);
643 }
644 /* Submit unmap queue packet */
645 amdgpu_ring_commit(kiq_ring);
646
647 /*
648 * Ring test will do a basic scratch register change check.
649 * Just run this to ensure that unmap queues that is submitted
650 * before got processed successfully before returning.
651 */
652 r = amdgpu_ring_test_helper(kiq_ring);
653 spin_unlock(&kiq->ring_lock);
654 }
655
656 return r;
657 }
658
amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device * adev,int queue_bit)659 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
660 int queue_bit)
661 {
662 int mec, pipe, queue;
663 int set_resource_bit = 0;
664
665 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
666
667 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
668
669 return set_resource_bit;
670 }
671
amdgpu_gfx_mes_enable_kcq(struct amdgpu_device * adev,int xcc_id)672 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
673 {
674 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
675 struct amdgpu_ring *kiq_ring = &kiq->ring;
676 uint64_t queue_mask = ~0ULL;
677 int r, i, j;
678
679 amdgpu_device_flush_hdp(adev, NULL);
680
681 if (!adev->enable_uni_mes) {
682 spin_lock(&kiq->ring_lock);
683 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
684 if (r) {
685 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
686 spin_unlock(&kiq->ring_lock);
687 return r;
688 }
689
690 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
691 r = amdgpu_ring_test_helper(kiq_ring);
692 spin_unlock(&kiq->ring_lock);
693 if (r)
694 dev_err(adev->dev, "KIQ failed to set resources\n");
695 }
696
697 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
698 j = i + xcc_id * adev->gfx.num_compute_rings;
699 r = amdgpu_mes_map_legacy_queue(adev,
700 &adev->gfx.compute_ring[j],
701 xcc_id);
702 if (r) {
703 dev_err(adev->dev, "failed to map compute queue\n");
704 return r;
705 }
706 }
707
708 return 0;
709 }
710
amdgpu_gfx_enable_kcq(struct amdgpu_device * adev,int xcc_id)711 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
712 {
713 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
714 struct amdgpu_ring *kiq_ring = &kiq->ring;
715 uint64_t queue_mask = 0;
716 int r, i, j;
717
718 if (adev->mes.enable_legacy_queue_map)
719 return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
720
721 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
722 return -EINVAL;
723
724 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
725 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
726 continue;
727
728 /* This situation may be hit in the future if a new HW
729 * generation exposes more than 64 queues. If so, the
730 * definition of queue_mask needs updating */
731 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
732 dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
733 break;
734 }
735
736 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
737 }
738
739 amdgpu_device_flush_hdp(adev, NULL);
740
741 dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
742 kiq_ring->pipe, kiq_ring->queue);
743
744 spin_lock(&kiq->ring_lock);
745 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
746 adev->gfx.num_compute_rings +
747 kiq->pmf->set_resources_size);
748 if (r) {
749 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
750 spin_unlock(&kiq->ring_lock);
751 return r;
752 }
753
754 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
755 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
756 j = i + xcc_id * adev->gfx.num_compute_rings;
757 kiq->pmf->kiq_map_queues(kiq_ring,
758 &adev->gfx.compute_ring[j]);
759 }
760 /* Submit map queue packet */
761 amdgpu_ring_commit(kiq_ring);
762 /*
763 * Ring test will do a basic scratch register change check. Just run
764 * this to ensure that map queues that is submitted before got
765 * processed successfully before returning.
766 */
767 r = amdgpu_ring_test_helper(kiq_ring);
768 spin_unlock(&kiq->ring_lock);
769 if (r)
770 dev_err(adev->dev, "KCQ enable failed\n");
771
772 return r;
773 }
774
amdgpu_gfx_enable_kgq(struct amdgpu_device * adev,int xcc_id)775 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
776 {
777 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
778 struct amdgpu_ring *kiq_ring = &kiq->ring;
779 int r, i, j;
780
781 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
782 return -EINVAL;
783
784 amdgpu_device_flush_hdp(adev, NULL);
785
786 if (adev->mes.enable_legacy_queue_map) {
787 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
788 j = i + xcc_id * adev->gfx.num_gfx_rings;
789 r = amdgpu_mes_map_legacy_queue(adev,
790 &adev->gfx.gfx_ring[j],
791 xcc_id);
792 if (r) {
793 dev_err(adev->dev, "failed to map gfx queue\n");
794 return r;
795 }
796 }
797
798 return 0;
799 }
800
801 spin_lock(&kiq->ring_lock);
802 /* No need to map kcq on the slave */
803 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
804 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
805 adev->gfx.num_gfx_rings);
806 if (r) {
807 dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
808 spin_unlock(&kiq->ring_lock);
809 return r;
810 }
811
812 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
813 j = i + xcc_id * adev->gfx.num_gfx_rings;
814 kiq->pmf->kiq_map_queues(kiq_ring,
815 &adev->gfx.gfx_ring[j]);
816 }
817 }
818 /* Submit map queue packet */
819 amdgpu_ring_commit(kiq_ring);
820 /*
821 * Ring test will do a basic scratch register change check. Just run
822 * this to ensure that map queues that is submitted before got
823 * processed successfully before returning.
824 */
825 r = amdgpu_ring_test_helper(kiq_ring);
826 spin_unlock(&kiq->ring_lock);
827 if (r)
828 dev_err(adev->dev, "KGQ enable failed\n");
829
830 return r;
831 }
832
amdgpu_gfx_do_off_ctrl(struct amdgpu_device * adev,bool enable,bool no_delay)833 static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
834 bool no_delay)
835 {
836 unsigned long delay = GFX_OFF_DELAY_ENABLE;
837
838 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
839 return;
840
841 mutex_lock(&adev->gfx.gfx_off_mutex);
842
843 if (enable) {
844 /* If the count is already 0, it means there's an imbalance bug somewhere.
845 * Note that the bug may be in a different caller than the one which triggers the
846 * WARN_ON_ONCE.
847 */
848 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
849 goto unlock;
850
851 adev->gfx.gfx_off_req_count--;
852
853 if (adev->gfx.gfx_off_req_count == 0 &&
854 !adev->gfx.gfx_off_state) {
855 /* If going to s2idle, no need to wait */
856 if (no_delay) {
857 if (!amdgpu_dpm_set_powergating_by_smu(adev,
858 AMD_IP_BLOCK_TYPE_GFX, true, 0))
859 adev->gfx.gfx_off_state = true;
860 } else {
861 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
862 delay);
863 }
864 }
865 } else {
866 if (adev->gfx.gfx_off_req_count == 0) {
867 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
868
869 if (adev->gfx.gfx_off_state &&
870 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
871 adev->gfx.gfx_off_state = false;
872
873 if (adev->gfx.funcs->init_spm_golden) {
874 dev_dbg(adev->dev,
875 "GFXOFF is disabled, re-init SPM golden settings\n");
876 amdgpu_gfx_init_spm_golden(adev);
877 }
878 }
879 }
880
881 adev->gfx.gfx_off_req_count++;
882 }
883
884 unlock:
885 mutex_unlock(&adev->gfx.gfx_off_mutex);
886 }
887
888 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
889 *
890 * @adev: amdgpu_device pointer
891 * @bool enable true: enable gfx off feature, false: disable gfx off feature
892 *
893 * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
894 * 2. other client can send request to disable gfx off feature, the request should be honored.
895 * 3. other client can cancel their request of disable gfx off feature
896 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
897 *
898 * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
899 */
amdgpu_gfx_off_ctrl(struct amdgpu_device * adev,bool enable)900 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
901 {
902 /* If going to s2idle, no need to wait */
903 bool no_delay = adev->in_s0ix ? true : false;
904
905 amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
906 }
907
908 /* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
909 *
910 * @adev: amdgpu_device pointer
911 * @bool enable true: enable gfx off feature, false: disable gfx off feature
912 *
913 * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
914 * 2. other client can send request to disable gfx off feature, the request should be honored.
915 * 3. other client can cancel their request of disable gfx off feature
916 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
917 *
918 * gfx off allow will be issued immediately.
919 */
amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device * adev,bool enable)920 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
921 {
922 amdgpu_gfx_do_off_ctrl(adev, enable, true);
923 }
924
amdgpu_set_gfx_off_residency(struct amdgpu_device * adev,bool value)925 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
926 {
927 int r = 0;
928
929 mutex_lock(&adev->gfx.gfx_off_mutex);
930
931 r = amdgpu_dpm_set_residency_gfxoff(adev, value);
932
933 mutex_unlock(&adev->gfx.gfx_off_mutex);
934
935 return r;
936 }
937
amdgpu_get_gfx_off_residency(struct amdgpu_device * adev,u32 * value)938 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
939 {
940 int r = 0;
941
942 mutex_lock(&adev->gfx.gfx_off_mutex);
943
944 r = amdgpu_dpm_get_residency_gfxoff(adev, value);
945
946 mutex_unlock(&adev->gfx.gfx_off_mutex);
947
948 return r;
949 }
950
amdgpu_get_gfx_off_entrycount(struct amdgpu_device * adev,u64 * value)951 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
952 {
953 int r = 0;
954
955 mutex_lock(&adev->gfx.gfx_off_mutex);
956
957 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
958
959 mutex_unlock(&adev->gfx.gfx_off_mutex);
960
961 return r;
962 }
963
amdgpu_get_gfx_off_status(struct amdgpu_device * adev,uint32_t * value)964 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
965 {
966
967 int r = 0;
968
969 mutex_lock(&adev->gfx.gfx_off_mutex);
970
971 r = amdgpu_dpm_get_status_gfxoff(adev, value);
972
973 mutex_unlock(&adev->gfx.gfx_off_mutex);
974
975 return r;
976 }
977
amdgpu_gfx_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)978 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
979 {
980 int r;
981
982 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
983 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
984 r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
985 if (r)
986 return r;
987 }
988
989 r = amdgpu_ras_block_late_init(adev, ras_block);
990 if (r)
991 return r;
992
993 if (amdgpu_sriov_vf(adev))
994 return r;
995
996 if (adev->gfx.cp_ecc_error_irq.funcs) {
997 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
998 if (r)
999 goto late_fini;
1000 }
1001 } else {
1002 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
1003 }
1004
1005 return 0;
1006 late_fini:
1007 amdgpu_ras_block_late_fini(adev, ras_block);
1008 return r;
1009 }
1010
amdgpu_gfx_ras_sw_init(struct amdgpu_device * adev)1011 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
1012 {
1013 int err = 0;
1014 struct amdgpu_gfx_ras *ras = NULL;
1015
1016 /* adev->gfx.ras is NULL, which means gfx does not
1017 * support ras function, then do nothing here.
1018 */
1019 if (!adev->gfx.ras)
1020 return 0;
1021
1022 ras = adev->gfx.ras;
1023
1024 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1025 if (err) {
1026 dev_err(adev->dev, "Failed to register gfx ras block!\n");
1027 return err;
1028 }
1029
1030 strcpy(ras->ras_block.ras_comm.name, "gfx");
1031 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
1032 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
1033 adev->gfx.ras_if = &ras->ras_block.ras_comm;
1034
1035 /* If not define special ras_late_init function, use gfx default ras_late_init */
1036 if (!ras->ras_block.ras_late_init)
1037 ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
1038
1039 /* If not defined special ras_cb function, use default ras_cb */
1040 if (!ras->ras_block.ras_cb)
1041 ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
1042
1043 return 0;
1044 }
1045
amdgpu_gfx_poison_consumption_handler(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)1046 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
1047 struct amdgpu_iv_entry *entry)
1048 {
1049 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
1050 return adev->gfx.ras->poison_consumption_handler(adev, entry);
1051
1052 return 0;
1053 }
1054
amdgpu_gfx_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)1055 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
1056 void *err_data,
1057 struct amdgpu_iv_entry *entry)
1058 {
1059 /* TODO ue will trigger an interrupt.
1060 *
1061 * When “Full RAS” is enabled, the per-IP interrupt sources should
1062 * be disabled and the driver should only look for the aggregated
1063 * interrupt via sync flood
1064 */
1065 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
1066 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
1067 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
1068 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
1069 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1070 amdgpu_ras_reset_gpu(adev);
1071 }
1072 return AMDGPU_RAS_SUCCESS;
1073 }
1074
amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1075 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
1076 struct amdgpu_irq_src *source,
1077 struct amdgpu_iv_entry *entry)
1078 {
1079 struct ras_common_if *ras_if = adev->gfx.ras_if;
1080 struct ras_dispatch_if ih_data = {
1081 .entry = entry,
1082 };
1083
1084 if (!ras_if)
1085 return 0;
1086
1087 ih_data.head = *ras_if;
1088
1089 dev_err(adev->dev, "CP ECC ERROR IRQ\n");
1090 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1091 return 0;
1092 }
1093
amdgpu_gfx_ras_error_func(struct amdgpu_device * adev,void * ras_error_status,void (* func)(struct amdgpu_device * adev,void * ras_error_status,int xcc_id))1094 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
1095 void *ras_error_status,
1096 void (*func)(struct amdgpu_device *adev, void *ras_error_status,
1097 int xcc_id))
1098 {
1099 int i;
1100 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1101 uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1102 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1103
1104 if (err_data) {
1105 err_data->ue_count = 0;
1106 err_data->ce_count = 0;
1107 }
1108
1109 for_each_inst(i, xcc_mask)
1110 func(adev, ras_error_status, i);
1111 }
1112
amdgpu_kiq_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t xcc_id)1113 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1114 {
1115 signed long r, cnt = 0;
1116 unsigned long flags;
1117 uint32_t seq, reg_val_offs = 0, value = 0;
1118 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1119 struct amdgpu_ring *ring = &kiq->ring;
1120
1121 if (amdgpu_device_skip_hw_access(adev))
1122 return 0;
1123
1124 if (adev->mes.ring[0].sched.ready)
1125 return amdgpu_mes_rreg(adev, reg, xcc_id);
1126
1127 BUG_ON(!ring->funcs->emit_rreg);
1128
1129 spin_lock_irqsave(&kiq->ring_lock, flags);
1130 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
1131 pr_err("critical bug! too many kiq readers\n");
1132 goto failed_unlock;
1133 }
1134 r = amdgpu_ring_alloc(ring, 32);
1135 if (r)
1136 goto failed_unlock;
1137
1138 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1139 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1140 if (r)
1141 goto failed_undo;
1142
1143 amdgpu_ring_commit(ring);
1144 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1145
1146 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1147
1148 /* don't wait anymore for gpu reset case because this way may
1149 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1150 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1151 * never return if we keep waiting in virt_kiq_rreg, which cause
1152 * gpu_recover() hang there.
1153 *
1154 * also don't wait anymore for IRQ context
1155 * */
1156 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1157 goto failed_kiq_read;
1158
1159 might_sleep();
1160 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1161 if (amdgpu_in_reset(adev))
1162 goto failed_kiq_read;
1163
1164 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1165 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1166 }
1167
1168 if (cnt > MAX_KIQ_REG_TRY)
1169 goto failed_kiq_read;
1170
1171 mb();
1172 value = adev->wb.wb[reg_val_offs];
1173 amdgpu_device_wb_free(adev, reg_val_offs);
1174 return value;
1175
1176 failed_undo:
1177 amdgpu_ring_undo(ring);
1178 failed_unlock:
1179 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1180 failed_kiq_read:
1181 if (reg_val_offs)
1182 amdgpu_device_wb_free(adev, reg_val_offs);
1183 dev_err(adev->dev, "failed to read reg:%x\n", reg);
1184 return ~0;
1185 }
1186
amdgpu_kiq_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t xcc_id)1187 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1188 {
1189 signed long r, cnt = 0;
1190 unsigned long flags;
1191 uint32_t seq;
1192 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1193 struct amdgpu_ring *ring = &kiq->ring;
1194
1195 BUG_ON(!ring->funcs->emit_wreg);
1196
1197 if (amdgpu_device_skip_hw_access(adev))
1198 return;
1199
1200 if (adev->mes.ring[0].sched.ready) {
1201 amdgpu_mes_wreg(adev, reg, v, xcc_id);
1202 return;
1203 }
1204
1205 spin_lock_irqsave(&kiq->ring_lock, flags);
1206 r = amdgpu_ring_alloc(ring, 32);
1207 if (r)
1208 goto failed_unlock;
1209
1210 amdgpu_ring_emit_wreg(ring, reg, v);
1211 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1212 if (r)
1213 goto failed_undo;
1214
1215 amdgpu_ring_commit(ring);
1216 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1217
1218 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1219
1220 /* don't wait anymore for gpu reset case because this way may
1221 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1222 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1223 * never return if we keep waiting in virt_kiq_rreg, which cause
1224 * gpu_recover() hang there.
1225 *
1226 * also don't wait anymore for IRQ context
1227 * */
1228 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1229 goto failed_kiq_write;
1230
1231 might_sleep();
1232 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1233 if (amdgpu_in_reset(adev))
1234 goto failed_kiq_write;
1235
1236 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1237 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1238 }
1239
1240 if (cnt > MAX_KIQ_REG_TRY)
1241 goto failed_kiq_write;
1242
1243 return;
1244
1245 failed_undo:
1246 amdgpu_ring_undo(ring);
1247 failed_unlock:
1248 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1249 failed_kiq_write:
1250 dev_err(adev->dev, "failed to write reg:%x\n", reg);
1251 }
1252
amdgpu_gfx_get_hdp_flush_mask(struct amdgpu_ring * ring,uint32_t * hdp_flush_mask,uint32_t * reg_mem_engine)1253 void amdgpu_gfx_get_hdp_flush_mask(struct amdgpu_ring *ring,
1254 uint32_t *hdp_flush_mask, uint32_t *reg_mem_engine)
1255 {
1256
1257 if (!ring || !hdp_flush_mask || !reg_mem_engine) {
1258 DRM_INFO("%s:invalid params\n", __func__);
1259 return;
1260 }
1261
1262 const struct nbio_hdp_flush_reg *nbio_hf_reg = ring->adev->nbio.hdp_flush_reg;
1263
1264 switch (ring->funcs->type) {
1265 case AMDGPU_RING_TYPE_GFX:
1266 *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
1267 *reg_mem_engine = 1; /* pfp */
1268 break;
1269 case AMDGPU_RING_TYPE_COMPUTE:
1270 *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
1271 *reg_mem_engine = 0;
1272 break;
1273 case AMDGPU_RING_TYPE_MES:
1274 *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp8;
1275 *reg_mem_engine = 0;
1276 break;
1277 case AMDGPU_RING_TYPE_KIQ:
1278 *hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp9;
1279 *reg_mem_engine = 0;
1280 break;
1281 default:
1282 DRM_ERROR("%s:unsupported ring type %d\n", __func__, ring->funcs->type);
1283 return;
1284 }
1285 }
1286
amdgpu_kiq_hdp_flush(struct amdgpu_device * adev)1287 int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
1288 {
1289 signed long r, cnt = 0;
1290 unsigned long flags;
1291 uint32_t seq;
1292 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1293 struct amdgpu_ring *ring = &kiq->ring;
1294
1295 if (amdgpu_device_skip_hw_access(adev))
1296 return 0;
1297
1298 if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
1299 return amdgpu_mes_hdp_flush(adev);
1300
1301 if (!ring->funcs->emit_hdp_flush) {
1302 return -EOPNOTSUPP;
1303 }
1304
1305 spin_lock_irqsave(&kiq->ring_lock, flags);
1306 r = amdgpu_ring_alloc(ring, 32);
1307 if (r)
1308 goto failed_unlock;
1309
1310 amdgpu_ring_emit_hdp_flush(ring);
1311 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1312 if (r)
1313 goto failed_undo;
1314
1315 amdgpu_ring_commit(ring);
1316 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1317
1318 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1319
1320 /* don't wait anymore for gpu reset case because this way may
1321 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1322 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1323 * never return if we keep waiting in virt_kiq_rreg, which cause
1324 * gpu_recover() hang there.
1325 *
1326 * also don't wait anymore for IRQ context
1327 * */
1328 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1329 goto failed_kiq_hdp_flush;
1330
1331 might_sleep();
1332 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1333 if (amdgpu_in_reset(adev))
1334 goto failed_kiq_hdp_flush;
1335
1336 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1337 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1338 }
1339
1340 if (cnt > MAX_KIQ_REG_TRY) {
1341 dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
1342 return -ETIMEDOUT;
1343 }
1344
1345 return 0;
1346
1347 failed_undo:
1348 amdgpu_ring_undo(ring);
1349 failed_unlock:
1350 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1351 failed_kiq_hdp_flush:
1352 if (!amdgpu_in_reset(adev))
1353 dev_err(adev->dev, "failed to flush HDP via KIQ\n");
1354 return r < 0 ? r : -EIO;
1355 }
1356
amdgpu_gfx_get_num_kcq(struct amdgpu_device * adev)1357 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1358 {
1359 if (amdgpu_num_kcq == -1) {
1360 return 8;
1361 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1362 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1363 return 8;
1364 }
1365 return amdgpu_num_kcq;
1366 }
1367
amdgpu_gfx_cp_init_microcode(struct amdgpu_device * adev,uint32_t ucode_id)1368 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1369 uint32_t ucode_id)
1370 {
1371 const struct gfx_firmware_header_v1_0 *cp_hdr;
1372 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1373 struct amdgpu_firmware_info *info = NULL;
1374 const struct firmware *ucode_fw;
1375 unsigned int fw_size;
1376
1377 switch (ucode_id) {
1378 case AMDGPU_UCODE_ID_CP_PFP:
1379 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1380 adev->gfx.pfp_fw->data;
1381 adev->gfx.pfp_fw_version =
1382 le32_to_cpu(cp_hdr->header.ucode_version);
1383 adev->gfx.pfp_feature_version =
1384 le32_to_cpu(cp_hdr->ucode_feature_version);
1385 ucode_fw = adev->gfx.pfp_fw;
1386 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1387 break;
1388 case AMDGPU_UCODE_ID_CP_RS64_PFP:
1389 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1390 adev->gfx.pfp_fw->data;
1391 adev->gfx.pfp_fw_version =
1392 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1393 adev->gfx.pfp_feature_version =
1394 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1395 ucode_fw = adev->gfx.pfp_fw;
1396 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1397 break;
1398 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1399 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1400 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1401 adev->gfx.pfp_fw->data;
1402 ucode_fw = adev->gfx.pfp_fw;
1403 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1404 break;
1405 case AMDGPU_UCODE_ID_CP_ME:
1406 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1407 adev->gfx.me_fw->data;
1408 adev->gfx.me_fw_version =
1409 le32_to_cpu(cp_hdr->header.ucode_version);
1410 adev->gfx.me_feature_version =
1411 le32_to_cpu(cp_hdr->ucode_feature_version);
1412 ucode_fw = adev->gfx.me_fw;
1413 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1414 break;
1415 case AMDGPU_UCODE_ID_CP_RS64_ME:
1416 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1417 adev->gfx.me_fw->data;
1418 adev->gfx.me_fw_version =
1419 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1420 adev->gfx.me_feature_version =
1421 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1422 ucode_fw = adev->gfx.me_fw;
1423 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1424 break;
1425 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1426 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1427 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1428 adev->gfx.me_fw->data;
1429 ucode_fw = adev->gfx.me_fw;
1430 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1431 break;
1432 case AMDGPU_UCODE_ID_CP_CE:
1433 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1434 adev->gfx.ce_fw->data;
1435 adev->gfx.ce_fw_version =
1436 le32_to_cpu(cp_hdr->header.ucode_version);
1437 adev->gfx.ce_feature_version =
1438 le32_to_cpu(cp_hdr->ucode_feature_version);
1439 ucode_fw = adev->gfx.ce_fw;
1440 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1441 break;
1442 case AMDGPU_UCODE_ID_CP_MEC1:
1443 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1444 adev->gfx.mec_fw->data;
1445 adev->gfx.mec_fw_version =
1446 le32_to_cpu(cp_hdr->header.ucode_version);
1447 adev->gfx.mec_feature_version =
1448 le32_to_cpu(cp_hdr->ucode_feature_version);
1449 ucode_fw = adev->gfx.mec_fw;
1450 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1451 le32_to_cpu(cp_hdr->jt_size) * 4;
1452 break;
1453 case AMDGPU_UCODE_ID_CP_MEC1_JT:
1454 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1455 adev->gfx.mec_fw->data;
1456 ucode_fw = adev->gfx.mec_fw;
1457 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1458 break;
1459 case AMDGPU_UCODE_ID_CP_MEC2:
1460 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1461 adev->gfx.mec2_fw->data;
1462 adev->gfx.mec2_fw_version =
1463 le32_to_cpu(cp_hdr->header.ucode_version);
1464 adev->gfx.mec2_feature_version =
1465 le32_to_cpu(cp_hdr->ucode_feature_version);
1466 ucode_fw = adev->gfx.mec2_fw;
1467 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1468 le32_to_cpu(cp_hdr->jt_size) * 4;
1469 break;
1470 case AMDGPU_UCODE_ID_CP_MEC2_JT:
1471 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1472 adev->gfx.mec2_fw->data;
1473 ucode_fw = adev->gfx.mec2_fw;
1474 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1475 break;
1476 case AMDGPU_UCODE_ID_CP_RS64_MEC:
1477 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1478 adev->gfx.mec_fw->data;
1479 adev->gfx.mec_fw_version =
1480 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1481 adev->gfx.mec_feature_version =
1482 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1483 ucode_fw = adev->gfx.mec_fw;
1484 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1485 break;
1486 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1487 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1488 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1489 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1490 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1491 adev->gfx.mec_fw->data;
1492 ucode_fw = adev->gfx.mec_fw;
1493 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1494 break;
1495 default:
1496 dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1497 return;
1498 }
1499
1500 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1501 info = &adev->firmware.ucode[ucode_id];
1502 info->ucode_id = ucode_id;
1503 info->fw = ucode_fw;
1504 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1505 }
1506 }
1507
amdgpu_gfx_is_master_xcc(struct amdgpu_device * adev,int xcc_id)1508 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1509 {
1510 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1511 adev->gfx.num_xcc_per_xcp : 1));
1512 }
1513
amdgpu_gfx_get_current_compute_partition(struct device * dev,struct device_attribute * addr,char * buf)1514 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1515 struct device_attribute *addr,
1516 char *buf)
1517 {
1518 struct drm_device *ddev = dev_get_drvdata(dev);
1519 struct amdgpu_device *adev = drm_to_adev(ddev);
1520 int mode;
1521
1522 /* Only minimal precaution taken to reject requests while in reset.*/
1523 if (amdgpu_in_reset(adev))
1524 return -EPERM;
1525
1526 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1527 AMDGPU_XCP_FL_NONE);
1528
1529 return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1530 }
1531
amdgpu_gfx_set_compute_partition(struct device * dev,struct device_attribute * addr,const char * buf,size_t count)1532 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1533 struct device_attribute *addr,
1534 const char *buf, size_t count)
1535 {
1536 struct drm_device *ddev = dev_get_drvdata(dev);
1537 struct amdgpu_device *adev = drm_to_adev(ddev);
1538 enum amdgpu_gfx_partition mode;
1539 int ret = 0, num_xcc;
1540
1541 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1542 if (num_xcc % 2 != 0)
1543 return -EINVAL;
1544
1545 if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1546 mode = AMDGPU_SPX_PARTITION_MODE;
1547 } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1548 /*
1549 * DPX mode needs AIDs to be in multiple of 2.
1550 * Each AID connects 2 XCCs.
1551 */
1552 if (num_xcc%4)
1553 return -EINVAL;
1554 mode = AMDGPU_DPX_PARTITION_MODE;
1555 } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1556 if (num_xcc != 6)
1557 return -EINVAL;
1558 mode = AMDGPU_TPX_PARTITION_MODE;
1559 } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1560 if (num_xcc != 8)
1561 return -EINVAL;
1562 mode = AMDGPU_QPX_PARTITION_MODE;
1563 } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1564 mode = AMDGPU_CPX_PARTITION_MODE;
1565 } else {
1566 return -EINVAL;
1567 }
1568
1569 /* Don't allow a switch while under reset */
1570 if (!down_read_trylock(&adev->reset_domain->sem))
1571 return -EPERM;
1572
1573 ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1574
1575 up_read(&adev->reset_domain->sem);
1576
1577 if (ret)
1578 return ret;
1579
1580 return count;
1581 }
1582
compute_partition_mem_alloc_mode_show(struct device * dev,struct device_attribute * addr,char * buf)1583 static ssize_t compute_partition_mem_alloc_mode_show(struct device *dev,
1584 struct device_attribute *addr,
1585 char *buf)
1586 {
1587 struct drm_device *ddev = dev_get_drvdata(dev);
1588 struct amdgpu_device *adev = drm_to_adev(ddev);
1589 int mode = adev->xcp_mgr->mem_alloc_mode;
1590
1591 return sysfs_emit(buf, "%s\n",
1592 amdgpu_gfx_compute_mem_alloc_mode_desc(mode));
1593 }
1594
1595
compute_partition_mem_alloc_mode_store(struct device * dev,struct device_attribute * addr,const char * buf,size_t count)1596 static ssize_t compute_partition_mem_alloc_mode_store(struct device *dev,
1597 struct device_attribute *addr,
1598 const char *buf, size_t count)
1599 {
1600 struct drm_device *ddev = dev_get_drvdata(dev);
1601 struct amdgpu_device *adev = drm_to_adev(ddev);
1602
1603 if (!strncasecmp("CAPPING", buf, strlen("CAPPING")))
1604 adev->xcp_mgr->mem_alloc_mode = AMDGPU_PARTITION_MEM_CAPPING_EVEN;
1605 else if (!strncasecmp("ALL", buf, strlen("ALL")))
1606 adev->xcp_mgr->mem_alloc_mode = AMDGPU_PARTITION_MEM_ALLOC_ALL;
1607 else
1608 return -EINVAL;
1609
1610 return count;
1611 }
1612
1613 static const char *xcp_desc[] = {
1614 [AMDGPU_SPX_PARTITION_MODE] = "SPX",
1615 [AMDGPU_DPX_PARTITION_MODE] = "DPX",
1616 [AMDGPU_TPX_PARTITION_MODE] = "TPX",
1617 [AMDGPU_QPX_PARTITION_MODE] = "QPX",
1618 [AMDGPU_CPX_PARTITION_MODE] = "CPX",
1619 };
1620
amdgpu_gfx_get_available_compute_partition(struct device * dev,struct device_attribute * addr,char * buf)1621 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1622 struct device_attribute *addr,
1623 char *buf)
1624 {
1625 struct drm_device *ddev = dev_get_drvdata(dev);
1626 struct amdgpu_device *adev = drm_to_adev(ddev);
1627 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1628 int size = 0, mode;
1629 char *sep = "";
1630
1631 if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
1632 return sysfs_emit(buf, "Not supported\n");
1633
1634 for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
1635 size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
1636 sep = ", ";
1637 }
1638
1639 size += sysfs_emit_at(buf, size, "\n");
1640
1641 return size;
1642 }
1643
amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring * ring)1644 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1645 {
1646 struct amdgpu_device *adev = ring->adev;
1647 struct drm_gpu_scheduler *sched = &ring->sched;
1648 struct drm_sched_entity entity;
1649 static atomic_t counter;
1650 struct dma_fence *f;
1651 struct amdgpu_job *job;
1652 struct amdgpu_ib *ib;
1653 void *owner;
1654 int i, r;
1655
1656 /* Initialize the scheduler entity */
1657 r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1658 &sched, 1, NULL);
1659 if (r) {
1660 dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1661 goto err;
1662 }
1663
1664 /*
1665 * Use some unique dummy value as the owner to make sure we execute
1666 * the cleaner shader on each submission. The value just need to change
1667 * for each submission and is otherwise meaningless.
1668 */
1669 owner = (void *)(unsigned long)atomic_inc_return(&counter);
1670
1671 r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
1672 64, 0, &job,
1673 AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
1674 if (r)
1675 goto err;
1676
1677 job->enforce_isolation = true;
1678 /* always run the cleaner shader */
1679 job->run_cleaner_shader = true;
1680
1681 ib = &job->ibs[0];
1682 for (i = 0; i <= ring->funcs->align_mask; ++i)
1683 ib->ptr[i] = ring->funcs->nop;
1684 ib->length_dw = ring->funcs->align_mask + 1;
1685
1686 f = amdgpu_job_submit(job);
1687
1688 r = dma_fence_wait(f, false);
1689 if (r)
1690 goto err;
1691
1692 dma_fence_put(f);
1693
1694 /* Clean up the scheduler entity */
1695 drm_sched_entity_destroy(&entity);
1696 return 0;
1697
1698 err:
1699 return r;
1700 }
1701
amdgpu_gfx_run_cleaner_shader(struct amdgpu_device * adev,int xcp_id)1702 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1703 {
1704 int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1705 struct amdgpu_ring *ring;
1706 int num_xcc_to_clear;
1707 int i, r, xcc_id;
1708
1709 if (adev->gfx.num_xcc_per_xcp)
1710 num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1711 else
1712 num_xcc_to_clear = 1;
1713
1714 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1715 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1716 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1717 if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1718 r = amdgpu_gfx_run_cleaner_shader_job(ring);
1719 if (r)
1720 return r;
1721 num_xcc_to_clear--;
1722 break;
1723 }
1724 }
1725 }
1726
1727 if (num_xcc_to_clear)
1728 return -ENOENT;
1729
1730 return 0;
1731 }
1732
1733 /**
1734 * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
1735 * @dev: The device structure
1736 * @attr: The device attribute structure
1737 * @buf: The buffer containing the input data
1738 * @count: The size of the input data
1739 *
1740 * Provides the sysfs interface to manually run a cleaner shader, which is
1741 * used to clear the GPU state between different tasks. Writing a value to the
1742 * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
1743 * The value written corresponds to the partition index on multi-partition
1744 * devices. On single-partition devices, the value should be '0'.
1745 *
1746 * The cleaner shader clears the Local Data Store (LDS) and General Purpose
1747 * Registers (GPRs) to ensure data isolation between GPU workloads.
1748 *
1749 * Return: The number of bytes written to the sysfs file.
1750 */
amdgpu_gfx_set_run_cleaner_shader(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1751 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1752 struct device_attribute *attr,
1753 const char *buf,
1754 size_t count)
1755 {
1756 struct drm_device *ddev = dev_get_drvdata(dev);
1757 struct amdgpu_device *adev = drm_to_adev(ddev);
1758 int ret;
1759 long value;
1760
1761 if (amdgpu_in_reset(adev))
1762 return -EPERM;
1763 if (adev->in_suspend && !adev->in_runpm)
1764 return -EPERM;
1765
1766 if (adev->gfx.disable_kq)
1767 return -EPERM;
1768
1769 ret = kstrtol(buf, 0, &value);
1770
1771 if (ret)
1772 return -EINVAL;
1773
1774 if (value < 0)
1775 return -EINVAL;
1776
1777 if (adev->xcp_mgr) {
1778 if (value >= adev->xcp_mgr->num_xcps)
1779 return -EINVAL;
1780 } else {
1781 if (value > 1)
1782 return -EINVAL;
1783 }
1784
1785 ret = pm_runtime_get_sync(ddev->dev);
1786 if (ret < 0) {
1787 pm_runtime_put_autosuspend(ddev->dev);
1788 return ret;
1789 }
1790
1791 ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1792
1793 pm_runtime_put_autosuspend(ddev->dev);
1794
1795 if (ret)
1796 return ret;
1797
1798 return count;
1799 }
1800
1801 /**
1802 * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
1803 * @dev: The device structure
1804 * @attr: The device attribute structure
1805 * @buf: The buffer to store the output data
1806 *
1807 * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
1808 * feature for each GPU partition. Reading from the 'enforce_isolation'
1809 * sysfs file returns the isolation settings for all partitions, where '0'
1810 * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
1811 * and '3' indicates enabled without cleaner shader.
1812 *
1813 * Return: The number of bytes read from the sysfs file.
1814 */
amdgpu_gfx_get_enforce_isolation(struct device * dev,struct device_attribute * attr,char * buf)1815 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1816 struct device_attribute *attr,
1817 char *buf)
1818 {
1819 struct drm_device *ddev = dev_get_drvdata(dev);
1820 struct amdgpu_device *adev = drm_to_adev(ddev);
1821 int i;
1822 ssize_t size = 0;
1823
1824 if (adev->xcp_mgr) {
1825 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1826 size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1827 if (i < (adev->xcp_mgr->num_xcps - 1))
1828 size += sysfs_emit_at(buf, size, " ");
1829 }
1830 buf[size++] = '\n';
1831 } else {
1832 size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1833 }
1834
1835 return size;
1836 }
1837
1838 /**
1839 * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
1840 * @dev: The device structure
1841 * @attr: The device attribute structure
1842 * @buf: The buffer containing the input data
1843 * @count: The size of the input data
1844 *
1845 * This function allows control over the 'enforce_isolation' feature, which
1846 * serializes access to the graphics engine. Writing '0' to disable, '1' to
1847 * enable isolation with cleaner shader, '2' to enable legacy isolation without
1848 * cleaner shader, or '3' to enable process isolation without submitting the
1849 * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
1850 * for each partition. The input should specify the setting for all
1851 * partitions.
1852 *
1853 * Return: The number of bytes written to the sysfs file.
1854 */
amdgpu_gfx_set_enforce_isolation(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1855 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1856 struct device_attribute *attr,
1857 const char *buf, size_t count)
1858 {
1859 struct drm_device *ddev = dev_get_drvdata(dev);
1860 struct amdgpu_device *adev = drm_to_adev(ddev);
1861 long partition_values[MAX_XCP] = {0};
1862 int ret, i, num_partitions;
1863 const char *input_buf = buf;
1864
1865 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1866 ret = sscanf(input_buf, "%ld", &partition_values[i]);
1867 if (ret <= 0)
1868 break;
1869
1870 /* Move the pointer to the next value in the string */
1871 input_buf = strchr(input_buf, ' ');
1872 if (input_buf) {
1873 input_buf++;
1874 } else {
1875 i++;
1876 break;
1877 }
1878 }
1879 num_partitions = i;
1880
1881 if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1882 return -EINVAL;
1883
1884 if (!adev->xcp_mgr && num_partitions != 1)
1885 return -EINVAL;
1886
1887 for (i = 0; i < num_partitions; i++) {
1888 if (partition_values[i] != 0 &&
1889 partition_values[i] != 1 &&
1890 partition_values[i] != 2 &&
1891 partition_values[i] != 3)
1892 return -EINVAL;
1893 }
1894
1895 mutex_lock(&adev->enforce_isolation_mutex);
1896 for (i = 0; i < num_partitions; i++) {
1897 switch (partition_values[i]) {
1898 case 0:
1899 default:
1900 adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1901 break;
1902 case 1:
1903 adev->enforce_isolation[i] =
1904 AMDGPU_ENFORCE_ISOLATION_ENABLE;
1905 break;
1906 case 2:
1907 adev->enforce_isolation[i] =
1908 AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1909 break;
1910 case 3:
1911 adev->enforce_isolation[i] =
1912 AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1913 break;
1914 }
1915 }
1916 mutex_unlock(&adev->enforce_isolation_mutex);
1917
1918 amdgpu_mes_update_enforce_isolation(adev);
1919
1920 return count;
1921 }
1922
amdgpu_gfx_get_gfx_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)1923 static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
1924 struct device_attribute *attr,
1925 char *buf)
1926 {
1927 struct drm_device *ddev = dev_get_drvdata(dev);
1928 struct amdgpu_device *adev = drm_to_adev(ddev);
1929
1930 if (!adev)
1931 return -ENODEV;
1932
1933 return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
1934 }
1935
amdgpu_gfx_get_compute_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)1936 static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
1937 struct device_attribute *attr,
1938 char *buf)
1939 {
1940 struct drm_device *ddev = dev_get_drvdata(dev);
1941 struct amdgpu_device *adev = drm_to_adev(ddev);
1942
1943 if (!adev)
1944 return -ENODEV;
1945
1946 return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
1947 }
1948
1949 static DEVICE_ATTR(run_cleaner_shader, 0200,
1950 NULL, amdgpu_gfx_set_run_cleaner_shader);
1951
1952 static DEVICE_ATTR(enforce_isolation, 0644,
1953 amdgpu_gfx_get_enforce_isolation,
1954 amdgpu_gfx_set_enforce_isolation);
1955
1956 static DEVICE_ATTR(current_compute_partition, 0644,
1957 amdgpu_gfx_get_current_compute_partition,
1958 amdgpu_gfx_set_compute_partition);
1959
1960 static DEVICE_ATTR(available_compute_partition, 0444,
1961 amdgpu_gfx_get_available_compute_partition, NULL);
1962 static DEVICE_ATTR(gfx_reset_mask, 0444,
1963 amdgpu_gfx_get_gfx_reset_mask, NULL);
1964
1965 static DEVICE_ATTR(compute_reset_mask, 0444,
1966 amdgpu_gfx_get_compute_reset_mask, NULL);
1967
1968 static DEVICE_ATTR(compute_partition_mem_alloc_mode, 0644,
1969 compute_partition_mem_alloc_mode_show,
1970 compute_partition_mem_alloc_mode_store);
1971
amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device * adev)1972 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
1973 {
1974 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1975 bool xcp_switch_supported;
1976 int r;
1977
1978 if (!xcp_mgr)
1979 return 0;
1980
1981 xcp_switch_supported =
1982 (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1983
1984 if (!xcp_switch_supported)
1985 dev_attr_current_compute_partition.attr.mode &=
1986 ~(S_IWUSR | S_IWGRP | S_IWOTH);
1987
1988 r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1989 if (r)
1990 return r;
1991
1992 r = device_create_file(adev->dev,
1993 &dev_attr_compute_partition_mem_alloc_mode);
1994 if (r)
1995 return r;
1996
1997 if (xcp_switch_supported)
1998 r = device_create_file(adev->dev,
1999 &dev_attr_available_compute_partition);
2000
2001 return r;
2002 }
2003
amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device * adev)2004 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
2005 {
2006 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
2007 bool xcp_switch_supported;
2008
2009 if (!xcp_mgr)
2010 return;
2011
2012 xcp_switch_supported =
2013 (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
2014 device_remove_file(adev->dev, &dev_attr_current_compute_partition);
2015
2016 device_remove_file(adev->dev, &dev_attr_compute_partition_mem_alloc_mode);
2017
2018 if (xcp_switch_supported)
2019 device_remove_file(adev->dev,
2020 &dev_attr_available_compute_partition);
2021 }
2022
amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device * adev)2023 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
2024 {
2025 int r;
2026
2027 r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
2028 if (r)
2029 return r;
2030 if (adev->gfx.enable_cleaner_shader)
2031 r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
2032
2033 return r;
2034 }
2035
amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device * adev)2036 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
2037 {
2038 device_remove_file(adev->dev, &dev_attr_enforce_isolation);
2039 if (adev->gfx.enable_cleaner_shader)
2040 device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
2041 }
2042
amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device * adev)2043 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
2044 {
2045 int r = 0;
2046
2047 if (!amdgpu_gpu_recovery)
2048 return r;
2049
2050 if (adev->gfx.num_gfx_rings) {
2051 r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
2052 if (r)
2053 return r;
2054 }
2055
2056 if (adev->gfx.num_compute_rings) {
2057 r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
2058 if (r)
2059 return r;
2060 }
2061
2062 return r;
2063 }
2064
amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device * adev)2065 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
2066 {
2067 if (!amdgpu_gpu_recovery)
2068 return;
2069
2070 if (adev->gfx.num_gfx_rings)
2071 device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
2072
2073 if (adev->gfx.num_compute_rings)
2074 device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
2075 }
2076
amdgpu_gfx_sysfs_init(struct amdgpu_device * adev)2077 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
2078 {
2079 int r;
2080
2081 r = amdgpu_gfx_sysfs_xcp_init(adev);
2082 if (r) {
2083 dev_err(adev->dev, "failed to create xcp sysfs files");
2084 return r;
2085 }
2086
2087 r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
2088 if (r)
2089 dev_err(adev->dev, "failed to create isolation sysfs files");
2090
2091 r = amdgpu_gfx_sysfs_reset_mask_init(adev);
2092 if (r)
2093 dev_err(adev->dev, "failed to create reset mask sysfs files");
2094
2095 return r;
2096 }
2097
amdgpu_gfx_sysfs_fini(struct amdgpu_device * adev)2098 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
2099 {
2100 if (adev->dev->kobj.sd) {
2101 amdgpu_gfx_sysfs_xcp_fini(adev);
2102 amdgpu_gfx_sysfs_isolation_shader_fini(adev);
2103 amdgpu_gfx_sysfs_reset_mask_fini(adev);
2104 }
2105 }
2106
amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device * adev,unsigned int cleaner_shader_size)2107 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
2108 unsigned int cleaner_shader_size)
2109 {
2110 if (!adev->gfx.enable_cleaner_shader)
2111 return -EOPNOTSUPP;
2112
2113 return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
2114 AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
2115 &adev->gfx.cleaner_shader_obj,
2116 &adev->gfx.cleaner_shader_gpu_addr,
2117 (void **)&adev->gfx.cleaner_shader_cpu_ptr);
2118 }
2119
amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device * adev)2120 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
2121 {
2122 if (!adev->gfx.enable_cleaner_shader)
2123 return;
2124
2125 amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
2126 &adev->gfx.cleaner_shader_gpu_addr,
2127 (void **)&adev->gfx.cleaner_shader_cpu_ptr);
2128 }
2129
amdgpu_gfx_cleaner_shader_init(struct amdgpu_device * adev,unsigned int cleaner_shader_size,const void * cleaner_shader_ptr)2130 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
2131 unsigned int cleaner_shader_size,
2132 const void *cleaner_shader_ptr)
2133 {
2134 if (!adev->gfx.enable_cleaner_shader)
2135 return;
2136
2137 if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
2138 memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
2139 cleaner_shader_size);
2140 }
2141
2142 /**
2143 * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
2144 * @adev: amdgpu_device pointer
2145 * @idx: Index of the scheduler to control
2146 * @enable: Whether to enable or disable the KFD scheduler
2147 *
2148 * This function is used to control the KFD (Kernel Fusion Driver) scheduler
2149 * from the KGD. It is part of the cleaner shader feature. This function plays
2150 * a key role in enforcing process isolation on the GPU.
2151 *
2152 * The function uses a reference count mechanism (kfd_sch_req_count) to keep
2153 * track of the number of requests to enable the KFD scheduler. When a request
2154 * to enable the KFD scheduler is made, the reference count is decremented.
2155 * When the reference count reaches zero, a delayed work is scheduled to
2156 * enforce isolation after a delay of GFX_SLICE_PERIOD.
2157 *
2158 * When a request to disable the KFD scheduler is made, the function first
2159 * checks if the reference count is zero. If it is, it cancels the delayed work
2160 * for enforcing isolation and checks if the KFD scheduler is active. If the
2161 * KFD scheduler is active, it sends a request to stop the KFD scheduler and
2162 * sets the KFD scheduler state to inactive. Then, it increments the reference
2163 * count.
2164 *
2165 * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
2166 * scheduler state and reference count are updated atomically.
2167 *
2168 * Note: If the reference count is already zero when a request to enable the
2169 * KFD scheduler is made, it means there's an imbalance bug somewhere. The
2170 * function triggers a warning in this case.
2171 */
amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device * adev,u32 idx,bool enable)2172 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
2173 bool enable)
2174 {
2175 mutex_lock(&adev->gfx.userq_sch_mutex);
2176
2177 if (enable) {
2178 /* If the count is already 0, it means there's an imbalance bug somewhere.
2179 * Note that the bug may be in a different caller than the one which triggers the
2180 * WARN_ON_ONCE.
2181 */
2182 if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
2183 dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
2184 goto unlock;
2185 }
2186
2187 adev->gfx.userq_sch_req_count[idx]--;
2188
2189 if (adev->gfx.userq_sch_req_count[idx] == 0 &&
2190 adev->gfx.userq_sch_inactive[idx]) {
2191 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2192 msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
2193 }
2194 } else {
2195 if (adev->gfx.userq_sch_req_count[idx] == 0) {
2196 cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
2197 if (!adev->gfx.userq_sch_inactive[idx]) {
2198 amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
2199 if (adev->kfd.init_complete)
2200 amdgpu_amdkfd_stop_sched(adev, idx);
2201 adev->gfx.userq_sch_inactive[idx] = true;
2202 }
2203 }
2204
2205 adev->gfx.userq_sch_req_count[idx]++;
2206 }
2207
2208 unlock:
2209 mutex_unlock(&adev->gfx.userq_sch_mutex);
2210 }
2211
2212 /**
2213 * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
2214 *
2215 * @work: work_struct.
2216 *
2217 * This function is the work handler for enforcing shader isolation on AMD GPUs.
2218 * It counts the number of emitted fences for each GFX and compute ring. If there
2219 * are any fences, it schedules the `enforce_isolation_work` to be run after a
2220 * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
2221 * Driver (KFD) to resume the runqueue. The function is synchronized using the
2222 * `enforce_isolation_mutex`.
2223 */
amdgpu_gfx_enforce_isolation_handler(struct work_struct * work)2224 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
2225 {
2226 struct amdgpu_isolation_work *isolation_work =
2227 container_of(work, struct amdgpu_isolation_work, work.work);
2228 struct amdgpu_device *adev = isolation_work->adev;
2229 u32 i, idx, fences = 0;
2230
2231 if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
2232 idx = 0;
2233 else
2234 idx = isolation_work->xcp_id;
2235
2236 if (idx >= MAX_XCP)
2237 return;
2238
2239 mutex_lock(&adev->enforce_isolation_mutex);
2240 for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
2241 if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
2242 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2243 }
2244 for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
2245 if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
2246 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2247 }
2248 if (fences) {
2249 /* we've already had our timeslice, so let's wrap this up */
2250 schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2251 msecs_to_jiffies(1));
2252 } else {
2253 /* Tell KFD to resume the runqueue */
2254 WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
2255 WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
2256
2257 amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
2258 if (adev->kfd.init_complete)
2259 amdgpu_amdkfd_start_sched(adev, idx);
2260 adev->gfx.userq_sch_inactive[idx] = false;
2261 }
2262 mutex_unlock(&adev->enforce_isolation_mutex);
2263 }
2264
2265 /**
2266 * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
2267 * @adev: amdgpu_device pointer
2268 * @idx: Index of the GPU partition
2269 *
2270 * When kernel submissions come in, the jobs are given a time slice and once
2271 * that time slice is up, if there are KFD user queues active, kernel
2272 * submissions are blocked until KFD has had its time slice. Once the KFD time
2273 * slice is up, KFD user queues are preempted and kernel submissions are
2274 * unblocked and allowed to run again.
2275 */
2276 static void
amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device * adev,u32 idx)2277 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
2278 u32 idx)
2279 {
2280 unsigned long cjiffies;
2281 bool wait = false;
2282
2283 mutex_lock(&adev->enforce_isolation_mutex);
2284 if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2285 /* set the initial values if nothing is set */
2286 if (!adev->gfx.enforce_isolation_jiffies[idx]) {
2287 adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2288 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2289 }
2290 /* Make sure KFD gets a chance to run */
2291 if (amdgpu_amdkfd_compute_active(adev, idx)) {
2292 cjiffies = jiffies;
2293 if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
2294 cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
2295 if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
2296 /* if our time is up, let KGD work drain before scheduling more */
2297 wait = true;
2298 /* reset the timer period */
2299 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2300 } else {
2301 /* set the timer period to what's left in our time slice */
2302 adev->gfx.enforce_isolation_time[idx] =
2303 GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
2304 }
2305 } else {
2306 /* if jiffies wrap around we will just wait a little longer */
2307 adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2308 }
2309 } else {
2310 /* if there is no KFD work, then set the full slice period */
2311 adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2312 adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2313 }
2314 }
2315 mutex_unlock(&adev->enforce_isolation_mutex);
2316
2317 if (wait)
2318 msleep(GFX_SLICE_PERIOD_MS);
2319 }
2320
2321 /**
2322 * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
2323 * @ring: Pointer to the amdgpu_ring structure
2324 *
2325 * Ring begin_use helper implementation for gfx which serializes access to the
2326 * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2327 * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2328 * each get a time slice when both are active.
2329 */
amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring * ring)2330 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2331 {
2332 struct amdgpu_device *adev = ring->adev;
2333 u32 idx;
2334 bool sched_work = false;
2335
2336 if (!adev->gfx.enable_cleaner_shader)
2337 return;
2338
2339 if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2340 idx = 0;
2341 else
2342 idx = ring->xcp_id;
2343
2344 if (idx >= MAX_XCP)
2345 return;
2346
2347 /* Don't submit more work until KFD has had some time */
2348 amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
2349
2350 mutex_lock(&adev->enforce_isolation_mutex);
2351 if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2352 if (adev->kfd.init_complete)
2353 sched_work = true;
2354 }
2355 mutex_unlock(&adev->enforce_isolation_mutex);
2356
2357 if (sched_work)
2358 amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
2359 }
2360
2361 /**
2362 * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
2363 * @ring: Pointer to the amdgpu_ring structure
2364 *
2365 * Ring end_use helper implementation for gfx which serializes access to the
2366 * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2367 * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2368 * each get a time slice when both are active.
2369 */
amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring * ring)2370 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2371 {
2372 struct amdgpu_device *adev = ring->adev;
2373 u32 idx;
2374 bool sched_work = false;
2375
2376 if (!adev->gfx.enable_cleaner_shader)
2377 return;
2378
2379 if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2380 idx = 0;
2381 else
2382 idx = ring->xcp_id;
2383
2384 if (idx >= MAX_XCP)
2385 return;
2386
2387 mutex_lock(&adev->enforce_isolation_mutex);
2388 if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2389 if (adev->kfd.init_complete)
2390 sched_work = true;
2391 }
2392 mutex_unlock(&adev->enforce_isolation_mutex);
2393
2394 if (sched_work)
2395 amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
2396 }
2397
amdgpu_gfx_profile_idle_work_handler(struct work_struct * work)2398 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
2399 {
2400 struct amdgpu_device *adev =
2401 container_of(work, struct amdgpu_device, gfx.idle_work.work);
2402 enum PP_SMC_POWER_PROFILE profile;
2403 u32 i, fences = 0;
2404 int r;
2405
2406 if (adev->gfx.num_gfx_rings)
2407 profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2408 else
2409 profile = PP_SMC_POWER_PROFILE_COMPUTE;
2410
2411 for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
2412 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2413 for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
2414 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2415 if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
2416 mutex_lock(&adev->gfx.workload_profile_mutex);
2417 if (adev->gfx.workload_profile_active) {
2418 r = amdgpu_dpm_switch_power_profile(adev, profile, false);
2419 if (r)
2420 dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2421 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2422 "fullscreen 3D" : "compute");
2423 adev->gfx.workload_profile_active = false;
2424 }
2425 mutex_unlock(&adev->gfx.workload_profile_mutex);
2426 } else {
2427 schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2428 }
2429 }
2430
amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring * ring)2431 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
2432 {
2433 struct amdgpu_device *adev = ring->adev;
2434 enum PP_SMC_POWER_PROFILE profile;
2435 int r;
2436
2437 if (amdgpu_dpm_is_overdrive_enabled(adev))
2438 return;
2439
2440 if (adev->gfx.num_gfx_rings)
2441 profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2442 else
2443 profile = PP_SMC_POWER_PROFILE_COMPUTE;
2444
2445 atomic_inc(&adev->gfx.total_submission_cnt);
2446
2447 cancel_delayed_work_sync(&adev->gfx.idle_work);
2448
2449 /* We can safely return early here because we've cancelled the
2450 * the delayed work so there is no one else to set it to false
2451 * and we don't care if someone else sets it to true.
2452 */
2453 if (adev->gfx.workload_profile_active)
2454 return;
2455
2456 mutex_lock(&adev->gfx.workload_profile_mutex);
2457 if (!adev->gfx.workload_profile_active) {
2458 r = amdgpu_dpm_switch_power_profile(adev, profile, true);
2459 if (r)
2460 dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2461 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2462 "fullscreen 3D" : "compute");
2463 adev->gfx.workload_profile_active = true;
2464 }
2465 mutex_unlock(&adev->gfx.workload_profile_mutex);
2466 }
2467
amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring * ring)2468 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
2469 {
2470 struct amdgpu_device *adev = ring->adev;
2471
2472 if (amdgpu_dpm_is_overdrive_enabled(adev))
2473 return;
2474
2475 atomic_dec(&ring->adev->gfx.total_submission_cnt);
2476
2477 schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2478 }
2479
2480 /**
2481 * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
2482 *
2483 * @buffer: This is an output variable that gets the PACKET3 preamble setup.
2484 *
2485 * Return:
2486 * return the latest index.
2487 */
amdgpu_gfx_csb_preamble_start(u32 * buffer)2488 u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
2489 {
2490 u32 count = 0;
2491
2492 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2493 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2494
2495 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2496 buffer[count++] = cpu_to_le32(0x80000000);
2497 buffer[count++] = cpu_to_le32(0x80000000);
2498
2499 return count;
2500 }
2501
2502 /**
2503 * amdgpu_gfx_csb_data_parser - Parser CS data
2504 *
2505 * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
2506 * @buffer: This is an output variable that gets the PACKET3 preamble end.
2507 * @count: Index to start set the preemble end.
2508 *
2509 * Return:
2510 * return the latest index.
2511 */
amdgpu_gfx_csb_data_parser(struct amdgpu_device * adev,u32 * buffer,u32 count)2512 u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
2513 {
2514 const struct cs_section_def *sect = NULL;
2515 const struct cs_extent_def *ext = NULL;
2516 u32 i;
2517
2518 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2519 for (ext = sect->section; ext->extent != NULL; ++ext) {
2520 if (sect->id == SECT_CONTEXT) {
2521 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2522 buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2523
2524 for (i = 0; i < ext->reg_count; i++)
2525 buffer[count++] = cpu_to_le32(ext->extent[i]);
2526 }
2527 }
2528 }
2529
2530 return count;
2531 }
2532
2533 /**
2534 * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
2535 *
2536 * @buffer: This is an output variable that gets the PACKET3 preamble end.
2537 * @count: Index to start set the preemble end.
2538 */
amdgpu_gfx_csb_preamble_end(u32 * buffer,u32 count)2539 void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
2540 {
2541 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2542 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
2543
2544 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
2545 buffer[count++] = cpu_to_le32(0);
2546 }
2547
2548 /*
2549 * debugfs for to enable/disable gfx job submission to specific core.
2550 */
2551 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_gfx_sched_mask_set(void * data,u64 val)2552 static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
2553 {
2554 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2555 u32 i;
2556 u64 mask = 0;
2557 struct amdgpu_ring *ring;
2558
2559 if (!adev)
2560 return -ENODEV;
2561
2562 mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
2563 if ((val & mask) == 0)
2564 return -EINVAL;
2565
2566 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2567 ring = &adev->gfx.gfx_ring[i];
2568 if (val & (1 << i))
2569 ring->sched.ready = true;
2570 else
2571 ring->sched.ready = false;
2572 }
2573 /* publish sched.ready flag update effective immediately across smp */
2574 smp_rmb();
2575 return 0;
2576 }
2577
amdgpu_debugfs_gfx_sched_mask_get(void * data,u64 * val)2578 static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
2579 {
2580 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2581 u32 i;
2582 u64 mask = 0;
2583 struct amdgpu_ring *ring;
2584
2585 if (!adev)
2586 return -ENODEV;
2587 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2588 ring = &adev->gfx.gfx_ring[i];
2589 if (ring->sched.ready)
2590 mask |= 1ULL << i;
2591 }
2592
2593 *val = mask;
2594 return 0;
2595 }
2596
2597 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
2598 amdgpu_debugfs_gfx_sched_mask_get,
2599 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
2600
2601 #endif
2602
amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device * adev)2603 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
2604 {
2605 #if defined(CONFIG_DEBUG_FS)
2606 struct drm_minor *minor = adev_to_drm(adev)->primary;
2607 struct dentry *root = minor->debugfs_root;
2608 char name[32];
2609
2610 if (!(adev->gfx.num_gfx_rings > 1))
2611 return;
2612 sprintf(name, "amdgpu_gfx_sched_mask");
2613 debugfs_create_file(name, 0600, root, adev,
2614 &amdgpu_debugfs_gfx_sched_mask_fops);
2615 #endif
2616 }
2617
2618 /*
2619 * debugfs for to enable/disable compute job submission to specific core.
2620 */
2621 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_compute_sched_mask_set(void * data,u64 val)2622 static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
2623 {
2624 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2625 u32 i;
2626 u64 mask = 0;
2627 struct amdgpu_ring *ring;
2628
2629 if (!adev)
2630 return -ENODEV;
2631
2632 mask = (1ULL << adev->gfx.num_compute_rings) - 1;
2633 if ((val & mask) == 0)
2634 return -EINVAL;
2635
2636 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2637 ring = &adev->gfx.compute_ring[i];
2638 if (val & (1 << i))
2639 ring->sched.ready = true;
2640 else
2641 ring->sched.ready = false;
2642 }
2643
2644 /* publish sched.ready flag update effective immediately across smp */
2645 smp_rmb();
2646 return 0;
2647 }
2648
amdgpu_debugfs_compute_sched_mask_get(void * data,u64 * val)2649 static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
2650 {
2651 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2652 u32 i;
2653 u64 mask = 0;
2654 struct amdgpu_ring *ring;
2655
2656 if (!adev)
2657 return -ENODEV;
2658 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2659 ring = &adev->gfx.compute_ring[i];
2660 if (ring->sched.ready)
2661 mask |= 1ULL << i;
2662 }
2663
2664 *val = mask;
2665 return 0;
2666 }
2667
2668 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
2669 amdgpu_debugfs_compute_sched_mask_get,
2670 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
2671
2672 #endif
2673
amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device * adev)2674 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
2675 {
2676 #if defined(CONFIG_DEBUG_FS)
2677 struct drm_minor *minor = adev_to_drm(adev)->primary;
2678 struct dentry *root = minor->debugfs_root;
2679 char name[32];
2680
2681 if (!(adev->gfx.num_compute_rings > 1))
2682 return;
2683 sprintf(name, "amdgpu_compute_sched_mask");
2684 debugfs_create_file(name, 0600, root, adev,
2685 &amdgpu_debugfs_compute_sched_mask_fops);
2686 #endif
2687 }
2688
2689