xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c (revision e77a8005748547fb1f10645097f13ccdd804d7e5)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
36 
37 /* delay 0.1 second to enable gfx off feature */
38 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
39 
40 #define GFX_OFF_NO_DELAY 0
41 
42 /*
43  * GPU GFX IP block helpers function.
44  */
45 
46 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
47 				int pipe, int queue)
48 {
49 	int bit = 0;
50 
51 	bit += mec * adev->gfx.mec.num_pipe_per_mec
52 		* adev->gfx.mec.num_queue_per_pipe;
53 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
54 	bit += queue;
55 
56 	return bit;
57 }
58 
59 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
60 				 int *mec, int *pipe, int *queue)
61 {
62 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
63 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
64 		% adev->gfx.mec.num_pipe_per_mec;
65 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
66 	       / adev->gfx.mec.num_pipe_per_mec;
67 
68 }
69 
70 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
71 				     int xcc_id, int mec, int pipe, int queue)
72 {
73 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
74 			adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
75 }
76 
77 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
78 			       int me, int pipe, int queue)
79 {
80 	int bit = 0;
81 
82 	bit += me * adev->gfx.me.num_pipe_per_me
83 		* adev->gfx.me.num_queue_per_pipe;
84 	bit += pipe * adev->gfx.me.num_queue_per_pipe;
85 	bit += queue;
86 
87 	return bit;
88 }
89 
90 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
91 				    int me, int pipe, int queue)
92 {
93 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
94 			adev->gfx.me.queue_bitmap);
95 }
96 
97 /**
98  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
99  *
100  * @mask: array in which the per-shader array disable masks will be stored
101  * @max_se: number of SEs
102  * @max_sh: number of SHs
103  *
104  * The bitmask of CUs to be disabled in the shader array determined by se and
105  * sh is stored in mask[se * max_sh + sh].
106  */
107 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
108 {
109 	unsigned int se, sh, cu;
110 	const char *p;
111 
112 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
113 
114 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
115 		return;
116 
117 	p = amdgpu_disable_cu;
118 	for (;;) {
119 		char *next;
120 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
121 
122 		if (ret < 3) {
123 			DRM_ERROR("amdgpu: could not parse disable_cu\n");
124 			return;
125 		}
126 
127 		if (se < max_se && sh < max_sh && cu < 16) {
128 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
129 			mask[se * max_sh + sh] |= 1u << cu;
130 		} else {
131 			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
132 				  se, sh, cu);
133 		}
134 
135 		next = strchr(p, ',');
136 		if (!next)
137 			break;
138 		p = next + 1;
139 	}
140 }
141 
142 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
143 {
144 	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
145 }
146 
147 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
148 {
149 	if (amdgpu_compute_multipipe != -1) {
150 		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
151 			 amdgpu_compute_multipipe);
152 		return amdgpu_compute_multipipe == 1;
153 	}
154 
155 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
156 		return true;
157 
158 	/* FIXME: spreading the queues across pipes causes perf regressions
159 	 * on POLARIS11 compute workloads */
160 	if (adev->asic_type == CHIP_POLARIS11)
161 		return false;
162 
163 	return adev->gfx.mec.num_mec > 1;
164 }
165 
166 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
167 						struct amdgpu_ring *ring)
168 {
169 	int queue = ring->queue;
170 	int pipe = ring->pipe;
171 
172 	/* Policy: use pipe1 queue0 as high priority graphics queue if we
173 	 * have more than one gfx pipe.
174 	 */
175 	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
176 	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
177 		int me = ring->me;
178 		int bit;
179 
180 		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
181 		if (ring == &adev->gfx.gfx_ring[bit])
182 			return true;
183 	}
184 
185 	return false;
186 }
187 
188 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
189 					       struct amdgpu_ring *ring)
190 {
191 	/* Policy: use 1st queue as high priority compute queue if we
192 	 * have more than one compute queue.
193 	 */
194 	if (adev->gfx.num_compute_rings > 1 &&
195 	    ring == &adev->gfx.compute_ring[0])
196 		return true;
197 
198 	return false;
199 }
200 
201 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
202 {
203 	int i, j, queue, pipe;
204 	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
205 	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
206 				     adev->gfx.mec.num_queue_per_pipe,
207 				     adev->gfx.num_compute_rings);
208 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
209 
210 	if (multipipe_policy) {
211 		/* policy: make queues evenly cross all pipes on MEC1 only
212 		 * for multiple xcc, just use the original policy for simplicity */
213 		for (j = 0; j < num_xcc; j++) {
214 			for (i = 0; i < max_queues_per_mec; i++) {
215 				pipe = i % adev->gfx.mec.num_pipe_per_mec;
216 				queue = (i / adev->gfx.mec.num_pipe_per_mec) %
217 					 adev->gfx.mec.num_queue_per_pipe;
218 
219 				set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
220 					adev->gfx.mec_bitmap[j].queue_bitmap);
221 			}
222 		}
223 	} else {
224 		/* policy: amdgpu owns all queues in the given pipe */
225 		for (j = 0; j < num_xcc; j++) {
226 			for (i = 0; i < max_queues_per_mec; ++i)
227 				set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
228 		}
229 	}
230 
231 	for (j = 0; j < num_xcc; j++) {
232 		dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
233 			bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
234 	}
235 }
236 
237 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
238 {
239 	int i, queue, pipe;
240 	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
241 	int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
242 					adev->gfx.me.num_queue_per_pipe;
243 
244 	if (multipipe_policy) {
245 		/* policy: amdgpu owns the first queue per pipe at this stage
246 		 * will extend to mulitple queues per pipe later */
247 		for (i = 0; i < max_queues_per_me; i++) {
248 			pipe = i % adev->gfx.me.num_pipe_per_me;
249 			queue = (i / adev->gfx.me.num_pipe_per_me) %
250 				adev->gfx.me.num_queue_per_pipe;
251 
252 			set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
253 				adev->gfx.me.queue_bitmap);
254 		}
255 	} else {
256 		for (i = 0; i < max_queues_per_me; ++i)
257 			set_bit(i, adev->gfx.me.queue_bitmap);
258 	}
259 
260 	/* update the number of active graphics rings */
261 	adev->gfx.num_gfx_rings =
262 		bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
263 }
264 
265 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
266 				  struct amdgpu_ring *ring, int xcc_id)
267 {
268 	int queue_bit;
269 	int mec, pipe, queue;
270 
271 	queue_bit = adev->gfx.mec.num_mec
272 		    * adev->gfx.mec.num_pipe_per_mec
273 		    * adev->gfx.mec.num_queue_per_pipe;
274 
275 	while (--queue_bit >= 0) {
276 		if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
277 			continue;
278 
279 		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
280 
281 		/*
282 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
283 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
284 		 * only can be issued on queue 0.
285 		 */
286 		if ((mec == 1 && pipe > 1) || queue != 0)
287 			continue;
288 
289 		ring->me = mec + 1;
290 		ring->pipe = pipe;
291 		ring->queue = queue;
292 
293 		return 0;
294 	}
295 
296 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
297 	return -EINVAL;
298 }
299 
300 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
301 {
302 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
303 	struct amdgpu_irq_src *irq = &kiq->irq;
304 	struct amdgpu_ring *ring = &kiq->ring;
305 	int r = 0;
306 
307 	spin_lock_init(&kiq->ring_lock);
308 
309 	ring->adev = NULL;
310 	ring->ring_obj = NULL;
311 	ring->use_doorbell = true;
312 	ring->xcc_id = xcc_id;
313 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
314 	ring->doorbell_index =
315 		(adev->doorbell_index.kiq +
316 		 xcc_id * adev->doorbell_index.xcc_doorbell_range)
317 		<< 1;
318 
319 	r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
320 	if (r)
321 		return r;
322 
323 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
324 	ring->no_scheduler = true;
325 	snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
326 		 (unsigned char)xcc_id, (unsigned char)ring->me,
327 		 (unsigned char)ring->pipe, (unsigned char)ring->queue);
328 	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
329 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
330 	if (r)
331 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
332 
333 	return r;
334 }
335 
336 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
337 {
338 	amdgpu_ring_fini(ring);
339 }
340 
341 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
342 {
343 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
344 
345 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
346 }
347 
348 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
349 			unsigned int hpd_size, int xcc_id)
350 {
351 	int r;
352 	u32 *hpd;
353 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
354 
355 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
356 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
357 				    &kiq->eop_gpu_addr, (void **)&hpd);
358 	if (r) {
359 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
360 		return r;
361 	}
362 
363 	memset(hpd, 0, hpd_size);
364 
365 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
366 	if (unlikely(r != 0))
367 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
368 	amdgpu_bo_kunmap(kiq->eop_obj);
369 	amdgpu_bo_unreserve(kiq->eop_obj);
370 
371 	return 0;
372 }
373 
374 /* create MQD for each compute/gfx queue */
375 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
376 			   unsigned int mqd_size, int xcc_id)
377 {
378 	int r, i, j;
379 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
380 	struct amdgpu_ring *ring = &kiq->ring;
381 	u32 domain = AMDGPU_GEM_DOMAIN_GTT;
382 
383 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
384 	/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
385 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
386 		domain |= AMDGPU_GEM_DOMAIN_VRAM;
387 #endif
388 
389 	/* create MQD for KIQ */
390 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
391 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
392 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
393 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
394 		 * KIQ MQD no matter SRIOV or Bare-metal
395 		 */
396 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
397 					    AMDGPU_GEM_DOMAIN_VRAM |
398 					    AMDGPU_GEM_DOMAIN_GTT,
399 					    &ring->mqd_obj,
400 					    &ring->mqd_gpu_addr,
401 					    &ring->mqd_ptr);
402 		if (r) {
403 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
404 			return r;
405 		}
406 
407 		/* prepare MQD backup */
408 		kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
409 		if (!kiq->mqd_backup) {
410 			dev_warn(adev->dev,
411 				 "no memory to create MQD backup for ring %s\n", ring->name);
412 			return -ENOMEM;
413 		}
414 	}
415 
416 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
417 		/* create MQD for each KGQ */
418 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
419 			ring = &adev->gfx.gfx_ring[i];
420 			if (!ring->mqd_obj) {
421 				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
422 							    domain, &ring->mqd_obj,
423 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
424 				if (r) {
425 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
426 					return r;
427 				}
428 
429 				ring->mqd_size = mqd_size;
430 				/* prepare MQD backup */
431 				adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
432 				if (!adev->gfx.me.mqd_backup[i]) {
433 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
434 					return -ENOMEM;
435 				}
436 			}
437 		}
438 	}
439 
440 	/* create MQD for each KCQ */
441 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
442 		j = i + xcc_id * adev->gfx.num_compute_rings;
443 		ring = &adev->gfx.compute_ring[j];
444 		if (!ring->mqd_obj) {
445 			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
446 						    domain, &ring->mqd_obj,
447 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
448 			if (r) {
449 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
450 				return r;
451 			}
452 
453 			ring->mqd_size = mqd_size;
454 			/* prepare MQD backup */
455 			adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
456 			if (!adev->gfx.mec.mqd_backup[j]) {
457 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
458 				return -ENOMEM;
459 			}
460 		}
461 	}
462 
463 	return 0;
464 }
465 
466 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
467 {
468 	struct amdgpu_ring *ring = NULL;
469 	int i, j;
470 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
471 
472 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
473 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
474 			ring = &adev->gfx.gfx_ring[i];
475 			kfree(adev->gfx.me.mqd_backup[i]);
476 			amdgpu_bo_free_kernel(&ring->mqd_obj,
477 					      &ring->mqd_gpu_addr,
478 					      &ring->mqd_ptr);
479 		}
480 	}
481 
482 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
483 		j = i + xcc_id * adev->gfx.num_compute_rings;
484 		ring = &adev->gfx.compute_ring[j];
485 		kfree(adev->gfx.mec.mqd_backup[j]);
486 		amdgpu_bo_free_kernel(&ring->mqd_obj,
487 				      &ring->mqd_gpu_addr,
488 				      &ring->mqd_ptr);
489 	}
490 
491 	ring = &kiq->ring;
492 	kfree(kiq->mqd_backup);
493 	amdgpu_bo_free_kernel(&ring->mqd_obj,
494 			      &ring->mqd_gpu_addr,
495 			      &ring->mqd_ptr);
496 }
497 
498 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
499 {
500 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
501 	struct amdgpu_ring *kiq_ring = &kiq->ring;
502 	int i, r = 0;
503 	int j;
504 
505 	if (adev->enable_mes) {
506 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
507 			j = i + xcc_id * adev->gfx.num_compute_rings;
508 			amdgpu_mes_unmap_legacy_queue(adev,
509 						   &adev->gfx.compute_ring[j],
510 						   RESET_QUEUES, 0, 0);
511 		}
512 		return 0;
513 	}
514 
515 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
516 		return -EINVAL;
517 
518 	if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev))
519 		return 0;
520 
521 	spin_lock(&kiq->ring_lock);
522 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
523 					adev->gfx.num_compute_rings)) {
524 		spin_unlock(&kiq->ring_lock);
525 		return -ENOMEM;
526 	}
527 
528 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
529 		j = i + xcc_id * adev->gfx.num_compute_rings;
530 		kiq->pmf->kiq_unmap_queues(kiq_ring,
531 					   &adev->gfx.compute_ring[j],
532 					   RESET_QUEUES, 0, 0);
533 	}
534 	/* Submit unmap queue packet */
535 	amdgpu_ring_commit(kiq_ring);
536 	/*
537 	 * Ring test will do a basic scratch register change check. Just run
538 	 * this to ensure that unmap queues that is submitted before got
539 	 * processed successfully before returning.
540 	 */
541 	r = amdgpu_ring_test_helper(kiq_ring);
542 
543 	spin_unlock(&kiq->ring_lock);
544 
545 	return r;
546 }
547 
548 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
549 {
550 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
551 	struct amdgpu_ring *kiq_ring = &kiq->ring;
552 	int i, r = 0;
553 	int j;
554 
555 	if (adev->enable_mes) {
556 		if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
557 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
558 				j = i + xcc_id * adev->gfx.num_gfx_rings;
559 				amdgpu_mes_unmap_legacy_queue(adev,
560 						      &adev->gfx.gfx_ring[j],
561 						      PREEMPT_QUEUES, 0, 0);
562 			}
563 		}
564 		return 0;
565 	}
566 
567 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
568 		return -EINVAL;
569 
570 	if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang)
571 		return 0;
572 
573 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
574 		spin_lock(&kiq->ring_lock);
575 		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
576 						adev->gfx.num_gfx_rings)) {
577 			spin_unlock(&kiq->ring_lock);
578 			return -ENOMEM;
579 		}
580 
581 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
582 			j = i + xcc_id * adev->gfx.num_gfx_rings;
583 			kiq->pmf->kiq_unmap_queues(kiq_ring,
584 						   &adev->gfx.gfx_ring[j],
585 						   PREEMPT_QUEUES, 0, 0);
586 		}
587 		/* Submit unmap queue packet */
588 		amdgpu_ring_commit(kiq_ring);
589 
590 		/*
591 		 * Ring test will do a basic scratch register change check.
592 		 * Just run this to ensure that unmap queues that is submitted
593 		 * before got processed successfully before returning.
594 		 */
595 		r = amdgpu_ring_test_helper(kiq_ring);
596 		spin_unlock(&kiq->ring_lock);
597 	}
598 
599 	return r;
600 }
601 
602 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
603 					int queue_bit)
604 {
605 	int mec, pipe, queue;
606 	int set_resource_bit = 0;
607 
608 	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
609 
610 	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
611 
612 	return set_resource_bit;
613 }
614 
615 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
616 {
617 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
618 	struct amdgpu_ring *kiq_ring = &kiq->ring;
619 	uint64_t queue_mask = ~0ULL;
620 	int r, i, j;
621 
622 	amdgpu_device_flush_hdp(adev, NULL);
623 
624 	if (!adev->enable_uni_mes) {
625 		spin_lock(&kiq->ring_lock);
626 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
627 		if (r) {
628 			dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
629 			spin_unlock(&kiq->ring_lock);
630 			return r;
631 		}
632 
633 		kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
634 		r = amdgpu_ring_test_helper(kiq_ring);
635 		spin_unlock(&kiq->ring_lock);
636 		if (r)
637 			dev_err(adev->dev, "KIQ failed to set resources\n");
638 	}
639 
640 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
641 		j = i + xcc_id * adev->gfx.num_compute_rings;
642 		r = amdgpu_mes_map_legacy_queue(adev,
643 						&adev->gfx.compute_ring[j]);
644 		if (r) {
645 			dev_err(adev->dev, "failed to map compute queue\n");
646 			return r;
647 		}
648 	}
649 
650 	return 0;
651 }
652 
653 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
654 {
655 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
656 	struct amdgpu_ring *kiq_ring = &kiq->ring;
657 	uint64_t queue_mask = 0;
658 	int r, i, j;
659 
660 	if (adev->mes.enable_legacy_queue_map)
661 		return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
662 
663 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
664 		return -EINVAL;
665 
666 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
667 		if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
668 			continue;
669 
670 		/* This situation may be hit in the future if a new HW
671 		 * generation exposes more than 64 queues. If so, the
672 		 * definition of queue_mask needs updating */
673 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
674 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
675 			break;
676 		}
677 
678 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
679 	}
680 
681 	amdgpu_device_flush_hdp(adev, NULL);
682 
683 	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
684 		 kiq_ring->queue);
685 
686 	spin_lock(&kiq->ring_lock);
687 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
688 					adev->gfx.num_compute_rings +
689 					kiq->pmf->set_resources_size);
690 	if (r) {
691 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
692 		spin_unlock(&kiq->ring_lock);
693 		return r;
694 	}
695 
696 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
697 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
698 		j = i + xcc_id * adev->gfx.num_compute_rings;
699 		kiq->pmf->kiq_map_queues(kiq_ring,
700 					 &adev->gfx.compute_ring[j]);
701 	}
702 	/* Submit map queue packet */
703 	amdgpu_ring_commit(kiq_ring);
704 	/*
705 	 * Ring test will do a basic scratch register change check. Just run
706 	 * this to ensure that map queues that is submitted before got
707 	 * processed successfully before returning.
708 	 */
709 	r = amdgpu_ring_test_helper(kiq_ring);
710 	spin_unlock(&kiq->ring_lock);
711 	if (r)
712 		DRM_ERROR("KCQ enable failed\n");
713 
714 	return r;
715 }
716 
717 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
718 {
719 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
720 	struct amdgpu_ring *kiq_ring = &kiq->ring;
721 	int r, i, j;
722 
723 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
724 		return -EINVAL;
725 
726 	amdgpu_device_flush_hdp(adev, NULL);
727 
728 	if (adev->mes.enable_legacy_queue_map) {
729 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
730 			j = i + xcc_id * adev->gfx.num_gfx_rings;
731 			r = amdgpu_mes_map_legacy_queue(adev,
732 							&adev->gfx.gfx_ring[j]);
733 			if (r) {
734 				DRM_ERROR("failed to map gfx queue\n");
735 				return r;
736 			}
737 		}
738 
739 		return 0;
740 	}
741 
742 	spin_lock(&kiq->ring_lock);
743 	/* No need to map kcq on the slave */
744 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
745 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
746 						adev->gfx.num_gfx_rings);
747 		if (r) {
748 			DRM_ERROR("Failed to lock KIQ (%d).\n", r);
749 			spin_unlock(&kiq->ring_lock);
750 			return r;
751 		}
752 
753 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
754 			j = i + xcc_id * adev->gfx.num_gfx_rings;
755 			kiq->pmf->kiq_map_queues(kiq_ring,
756 						 &adev->gfx.gfx_ring[j]);
757 		}
758 	}
759 	/* Submit map queue packet */
760 	amdgpu_ring_commit(kiq_ring);
761 	/*
762 	 * Ring test will do a basic scratch register change check. Just run
763 	 * this to ensure that map queues that is submitted before got
764 	 * processed successfully before returning.
765 	 */
766 	r = amdgpu_ring_test_helper(kiq_ring);
767 	spin_unlock(&kiq->ring_lock);
768 	if (r)
769 		DRM_ERROR("KGQ enable failed\n");
770 
771 	return r;
772 }
773 
774 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
775  *
776  * @adev: amdgpu_device pointer
777  * @bool enable true: enable gfx off feature, false: disable gfx off feature
778  *
779  * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
780  * 2. other client can send request to disable gfx off feature, the request should be honored.
781  * 3. other client can cancel their request of disable gfx off feature
782  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
783  */
784 
785 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
786 {
787 	unsigned long delay = GFX_OFF_DELAY_ENABLE;
788 
789 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
790 		return;
791 
792 	mutex_lock(&adev->gfx.gfx_off_mutex);
793 
794 	if (enable) {
795 		/* If the count is already 0, it means there's an imbalance bug somewhere.
796 		 * Note that the bug may be in a different caller than the one which triggers the
797 		 * WARN_ON_ONCE.
798 		 */
799 		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
800 			goto unlock;
801 
802 		adev->gfx.gfx_off_req_count--;
803 
804 		if (adev->gfx.gfx_off_req_count == 0 &&
805 		    !adev->gfx.gfx_off_state) {
806 			/* If going to s2idle, no need to wait */
807 			if (adev->in_s0ix) {
808 				if (!amdgpu_dpm_set_powergating_by_smu(adev,
809 						AMD_IP_BLOCK_TYPE_GFX, true))
810 					adev->gfx.gfx_off_state = true;
811 			} else {
812 				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
813 					      delay);
814 			}
815 		}
816 	} else {
817 		if (adev->gfx.gfx_off_req_count == 0) {
818 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
819 
820 			if (adev->gfx.gfx_off_state &&
821 			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
822 				adev->gfx.gfx_off_state = false;
823 
824 				if (adev->gfx.funcs->init_spm_golden) {
825 					dev_dbg(adev->dev,
826 						"GFXOFF is disabled, re-init SPM golden settings\n");
827 					amdgpu_gfx_init_spm_golden(adev);
828 				}
829 			}
830 		}
831 
832 		adev->gfx.gfx_off_req_count++;
833 	}
834 
835 unlock:
836 	mutex_unlock(&adev->gfx.gfx_off_mutex);
837 }
838 
839 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
840 {
841 	int r = 0;
842 
843 	mutex_lock(&adev->gfx.gfx_off_mutex);
844 
845 	r = amdgpu_dpm_set_residency_gfxoff(adev, value);
846 
847 	mutex_unlock(&adev->gfx.gfx_off_mutex);
848 
849 	return r;
850 }
851 
852 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
853 {
854 	int r = 0;
855 
856 	mutex_lock(&adev->gfx.gfx_off_mutex);
857 
858 	r = amdgpu_dpm_get_residency_gfxoff(adev, value);
859 
860 	mutex_unlock(&adev->gfx.gfx_off_mutex);
861 
862 	return r;
863 }
864 
865 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
866 {
867 	int r = 0;
868 
869 	mutex_lock(&adev->gfx.gfx_off_mutex);
870 
871 	r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
872 
873 	mutex_unlock(&adev->gfx.gfx_off_mutex);
874 
875 	return r;
876 }
877 
878 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
879 {
880 
881 	int r = 0;
882 
883 	mutex_lock(&adev->gfx.gfx_off_mutex);
884 
885 	r = amdgpu_dpm_get_status_gfxoff(adev, value);
886 
887 	mutex_unlock(&adev->gfx.gfx_off_mutex);
888 
889 	return r;
890 }
891 
892 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
893 {
894 	int r;
895 
896 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
897 		if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
898 			r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
899 			if (r)
900 				return r;
901 		}
902 
903 		r = amdgpu_ras_block_late_init(adev, ras_block);
904 		if (r)
905 			return r;
906 
907 		if (adev->gfx.cp_ecc_error_irq.funcs) {
908 			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
909 			if (r)
910 				goto late_fini;
911 		}
912 	} else {
913 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
914 	}
915 
916 	return 0;
917 late_fini:
918 	amdgpu_ras_block_late_fini(adev, ras_block);
919 	return r;
920 }
921 
922 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
923 {
924 	int err = 0;
925 	struct amdgpu_gfx_ras *ras = NULL;
926 
927 	/* adev->gfx.ras is NULL, which means gfx does not
928 	 * support ras function, then do nothing here.
929 	 */
930 	if (!adev->gfx.ras)
931 		return 0;
932 
933 	ras = adev->gfx.ras;
934 
935 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
936 	if (err) {
937 		dev_err(adev->dev, "Failed to register gfx ras block!\n");
938 		return err;
939 	}
940 
941 	strcpy(ras->ras_block.ras_comm.name, "gfx");
942 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
943 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
944 	adev->gfx.ras_if = &ras->ras_block.ras_comm;
945 
946 	/* If not define special ras_late_init function, use gfx default ras_late_init */
947 	if (!ras->ras_block.ras_late_init)
948 		ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
949 
950 	/* If not defined special ras_cb function, use default ras_cb */
951 	if (!ras->ras_block.ras_cb)
952 		ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
953 
954 	return 0;
955 }
956 
957 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
958 						struct amdgpu_iv_entry *entry)
959 {
960 	if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
961 		return adev->gfx.ras->poison_consumption_handler(adev, entry);
962 
963 	return 0;
964 }
965 
966 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
967 		void *err_data,
968 		struct amdgpu_iv_entry *entry)
969 {
970 	/* TODO ue will trigger an interrupt.
971 	 *
972 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
973 	 * be disabled and the driver should only look for the aggregated
974 	 * interrupt via sync flood
975 	 */
976 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
977 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
978 		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
979 		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
980 			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
981 		amdgpu_ras_reset_gpu(adev);
982 	}
983 	return AMDGPU_RAS_SUCCESS;
984 }
985 
986 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
987 				  struct amdgpu_irq_src *source,
988 				  struct amdgpu_iv_entry *entry)
989 {
990 	struct ras_common_if *ras_if = adev->gfx.ras_if;
991 	struct ras_dispatch_if ih_data = {
992 		.entry = entry,
993 	};
994 
995 	if (!ras_if)
996 		return 0;
997 
998 	ih_data.head = *ras_if;
999 
1000 	DRM_ERROR("CP ECC ERROR IRQ\n");
1001 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1002 	return 0;
1003 }
1004 
1005 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
1006 		void *ras_error_status,
1007 		void (*func)(struct amdgpu_device *adev, void *ras_error_status,
1008 				int xcc_id))
1009 {
1010 	int i;
1011 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1012 	uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1013 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1014 
1015 	if (err_data) {
1016 		err_data->ue_count = 0;
1017 		err_data->ce_count = 0;
1018 	}
1019 
1020 	for_each_inst(i, xcc_mask)
1021 		func(adev, ras_error_status, i);
1022 }
1023 
1024 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1025 {
1026 	signed long r, cnt = 0;
1027 	unsigned long flags;
1028 	uint32_t seq, reg_val_offs = 0, value = 0;
1029 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1030 	struct amdgpu_ring *ring = &kiq->ring;
1031 
1032 	if (amdgpu_device_skip_hw_access(adev))
1033 		return 0;
1034 
1035 	if (adev->mes.ring[0].sched.ready)
1036 		return amdgpu_mes_rreg(adev, reg);
1037 
1038 	BUG_ON(!ring->funcs->emit_rreg);
1039 
1040 	spin_lock_irqsave(&kiq->ring_lock, flags);
1041 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
1042 		pr_err("critical bug! too many kiq readers\n");
1043 		goto failed_unlock;
1044 	}
1045 	r = amdgpu_ring_alloc(ring, 32);
1046 	if (r)
1047 		goto failed_unlock;
1048 
1049 	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1050 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1051 	if (r)
1052 		goto failed_undo;
1053 
1054 	amdgpu_ring_commit(ring);
1055 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1056 
1057 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1058 
1059 	/* don't wait anymore for gpu reset case because this way may
1060 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1061 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1062 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1063 	 * gpu_recover() hang there.
1064 	 *
1065 	 * also don't wait anymore for IRQ context
1066 	 * */
1067 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1068 		goto failed_kiq_read;
1069 
1070 	might_sleep();
1071 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1072 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1073 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1074 	}
1075 
1076 	if (cnt > MAX_KIQ_REG_TRY)
1077 		goto failed_kiq_read;
1078 
1079 	mb();
1080 	value = adev->wb.wb[reg_val_offs];
1081 	amdgpu_device_wb_free(adev, reg_val_offs);
1082 	return value;
1083 
1084 failed_undo:
1085 	amdgpu_ring_undo(ring);
1086 failed_unlock:
1087 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1088 failed_kiq_read:
1089 	if (reg_val_offs)
1090 		amdgpu_device_wb_free(adev, reg_val_offs);
1091 	dev_err(adev->dev, "failed to read reg:%x\n", reg);
1092 	return ~0;
1093 }
1094 
1095 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1096 {
1097 	signed long r, cnt = 0;
1098 	unsigned long flags;
1099 	uint32_t seq;
1100 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1101 	struct amdgpu_ring *ring = &kiq->ring;
1102 
1103 	BUG_ON(!ring->funcs->emit_wreg);
1104 
1105 	if (amdgpu_device_skip_hw_access(adev))
1106 		return;
1107 
1108 	if (adev->mes.ring[0].sched.ready) {
1109 		amdgpu_mes_wreg(adev, reg, v);
1110 		return;
1111 	}
1112 
1113 	spin_lock_irqsave(&kiq->ring_lock, flags);
1114 	r = amdgpu_ring_alloc(ring, 32);
1115 	if (r)
1116 		goto failed_unlock;
1117 
1118 	amdgpu_ring_emit_wreg(ring, reg, v);
1119 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1120 	if (r)
1121 		goto failed_undo;
1122 
1123 	amdgpu_ring_commit(ring);
1124 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1125 
1126 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1127 
1128 	/* don't wait anymore for gpu reset case because this way may
1129 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1130 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1131 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1132 	 * gpu_recover() hang there.
1133 	 *
1134 	 * also don't wait anymore for IRQ context
1135 	 * */
1136 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1137 		goto failed_kiq_write;
1138 
1139 	might_sleep();
1140 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1141 
1142 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1143 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1144 	}
1145 
1146 	if (cnt > MAX_KIQ_REG_TRY)
1147 		goto failed_kiq_write;
1148 
1149 	return;
1150 
1151 failed_undo:
1152 	amdgpu_ring_undo(ring);
1153 failed_unlock:
1154 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1155 failed_kiq_write:
1156 	dev_err(adev->dev, "failed to write reg:%x\n", reg);
1157 }
1158 
1159 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1160 {
1161 	if (amdgpu_num_kcq == -1) {
1162 		return 8;
1163 	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1164 		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1165 		return 8;
1166 	}
1167 	return amdgpu_num_kcq;
1168 }
1169 
1170 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1171 				  uint32_t ucode_id)
1172 {
1173 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1174 	const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1175 	struct amdgpu_firmware_info *info = NULL;
1176 	const struct firmware *ucode_fw;
1177 	unsigned int fw_size;
1178 
1179 	switch (ucode_id) {
1180 	case AMDGPU_UCODE_ID_CP_PFP:
1181 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1182 			adev->gfx.pfp_fw->data;
1183 		adev->gfx.pfp_fw_version =
1184 			le32_to_cpu(cp_hdr->header.ucode_version);
1185 		adev->gfx.pfp_feature_version =
1186 			le32_to_cpu(cp_hdr->ucode_feature_version);
1187 		ucode_fw = adev->gfx.pfp_fw;
1188 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1189 		break;
1190 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
1191 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1192 			adev->gfx.pfp_fw->data;
1193 		adev->gfx.pfp_fw_version =
1194 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1195 		adev->gfx.pfp_feature_version =
1196 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1197 		ucode_fw = adev->gfx.pfp_fw;
1198 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1199 		break;
1200 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1201 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1202 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1203 			adev->gfx.pfp_fw->data;
1204 		ucode_fw = adev->gfx.pfp_fw;
1205 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1206 		break;
1207 	case AMDGPU_UCODE_ID_CP_ME:
1208 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1209 			adev->gfx.me_fw->data;
1210 		adev->gfx.me_fw_version =
1211 			le32_to_cpu(cp_hdr->header.ucode_version);
1212 		adev->gfx.me_feature_version =
1213 			le32_to_cpu(cp_hdr->ucode_feature_version);
1214 		ucode_fw = adev->gfx.me_fw;
1215 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1216 		break;
1217 	case AMDGPU_UCODE_ID_CP_RS64_ME:
1218 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1219 			adev->gfx.me_fw->data;
1220 		adev->gfx.me_fw_version =
1221 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1222 		adev->gfx.me_feature_version =
1223 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1224 		ucode_fw = adev->gfx.me_fw;
1225 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1226 		break;
1227 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1228 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1229 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1230 			adev->gfx.me_fw->data;
1231 		ucode_fw = adev->gfx.me_fw;
1232 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1233 		break;
1234 	case AMDGPU_UCODE_ID_CP_CE:
1235 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1236 			adev->gfx.ce_fw->data;
1237 		adev->gfx.ce_fw_version =
1238 			le32_to_cpu(cp_hdr->header.ucode_version);
1239 		adev->gfx.ce_feature_version =
1240 			le32_to_cpu(cp_hdr->ucode_feature_version);
1241 		ucode_fw = adev->gfx.ce_fw;
1242 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1243 		break;
1244 	case AMDGPU_UCODE_ID_CP_MEC1:
1245 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1246 			adev->gfx.mec_fw->data;
1247 		adev->gfx.mec_fw_version =
1248 			le32_to_cpu(cp_hdr->header.ucode_version);
1249 		adev->gfx.mec_feature_version =
1250 			le32_to_cpu(cp_hdr->ucode_feature_version);
1251 		ucode_fw = adev->gfx.mec_fw;
1252 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1253 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1254 		break;
1255 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1256 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1257 			adev->gfx.mec_fw->data;
1258 		ucode_fw = adev->gfx.mec_fw;
1259 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1260 		break;
1261 	case AMDGPU_UCODE_ID_CP_MEC2:
1262 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1263 			adev->gfx.mec2_fw->data;
1264 		adev->gfx.mec2_fw_version =
1265 			le32_to_cpu(cp_hdr->header.ucode_version);
1266 		adev->gfx.mec2_feature_version =
1267 			le32_to_cpu(cp_hdr->ucode_feature_version);
1268 		ucode_fw = adev->gfx.mec2_fw;
1269 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1270 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1271 		break;
1272 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1273 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1274 			adev->gfx.mec2_fw->data;
1275 		ucode_fw = adev->gfx.mec2_fw;
1276 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1277 		break;
1278 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
1279 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1280 			adev->gfx.mec_fw->data;
1281 		adev->gfx.mec_fw_version =
1282 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1283 		adev->gfx.mec_feature_version =
1284 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1285 		ucode_fw = adev->gfx.mec_fw;
1286 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1287 		break;
1288 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1289 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1290 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1291 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1292 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1293 			adev->gfx.mec_fw->data;
1294 		ucode_fw = adev->gfx.mec_fw;
1295 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1296 		break;
1297 	default:
1298 		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1299 		return;
1300 	}
1301 
1302 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1303 		info = &adev->firmware.ucode[ucode_id];
1304 		info->ucode_id = ucode_id;
1305 		info->fw = ucode_fw;
1306 		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1307 	}
1308 }
1309 
1310 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1311 {
1312 	return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1313 			adev->gfx.num_xcc_per_xcp : 1));
1314 }
1315 
1316 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1317 						struct device_attribute *addr,
1318 						char *buf)
1319 {
1320 	struct drm_device *ddev = dev_get_drvdata(dev);
1321 	struct amdgpu_device *adev = drm_to_adev(ddev);
1322 	int mode;
1323 
1324 	mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1325 					       AMDGPU_XCP_FL_NONE);
1326 
1327 	return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1328 }
1329 
1330 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1331 						struct device_attribute *addr,
1332 						const char *buf, size_t count)
1333 {
1334 	struct drm_device *ddev = dev_get_drvdata(dev);
1335 	struct amdgpu_device *adev = drm_to_adev(ddev);
1336 	enum amdgpu_gfx_partition mode;
1337 	int ret = 0, num_xcc;
1338 
1339 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1340 	if (num_xcc % 2 != 0)
1341 		return -EINVAL;
1342 
1343 	if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1344 		mode = AMDGPU_SPX_PARTITION_MODE;
1345 	} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1346 		/*
1347 		 * DPX mode needs AIDs to be in multiple of 2.
1348 		 * Each AID connects 2 XCCs.
1349 		 */
1350 		if (num_xcc%4)
1351 			return -EINVAL;
1352 		mode = AMDGPU_DPX_PARTITION_MODE;
1353 	} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1354 		if (num_xcc != 6)
1355 			return -EINVAL;
1356 		mode = AMDGPU_TPX_PARTITION_MODE;
1357 	} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1358 		if (num_xcc != 8)
1359 			return -EINVAL;
1360 		mode = AMDGPU_QPX_PARTITION_MODE;
1361 	} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1362 		mode = AMDGPU_CPX_PARTITION_MODE;
1363 	} else {
1364 		return -EINVAL;
1365 	}
1366 
1367 	ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1368 
1369 	if (ret)
1370 		return ret;
1371 
1372 	return count;
1373 }
1374 
1375 static const char *xcp_desc[] = {
1376 	[AMDGPU_SPX_PARTITION_MODE] = "SPX",
1377 	[AMDGPU_DPX_PARTITION_MODE] = "DPX",
1378 	[AMDGPU_TPX_PARTITION_MODE] = "TPX",
1379 	[AMDGPU_QPX_PARTITION_MODE] = "QPX",
1380 	[AMDGPU_CPX_PARTITION_MODE] = "CPX",
1381 };
1382 
1383 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1384 						struct device_attribute *addr,
1385 						char *buf)
1386 {
1387 	struct drm_device *ddev = dev_get_drvdata(dev);
1388 	struct amdgpu_device *adev = drm_to_adev(ddev);
1389 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1390 	int size = 0, mode;
1391 	char *sep = "";
1392 
1393 	if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
1394 		return sysfs_emit(buf, "Not supported\n");
1395 
1396 	for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
1397 		size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
1398 		sep = ", ";
1399 	}
1400 
1401 	size += sysfs_emit_at(buf, size, "\n");
1402 
1403 	return size;
1404 }
1405 
1406 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1407 {
1408 	struct amdgpu_device *adev = ring->adev;
1409 	struct drm_gpu_scheduler *sched = &ring->sched;
1410 	struct drm_sched_entity entity;
1411 	struct dma_fence *f;
1412 	struct amdgpu_job *job;
1413 	struct amdgpu_ib *ib;
1414 	int i, r;
1415 
1416 	/* Initialize the scheduler entity */
1417 	r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1418 				  &sched, 1, NULL);
1419 	if (r) {
1420 		dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1421 		goto err;
1422 	}
1423 
1424 	r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
1425 				     64, 0,
1426 				     &job);
1427 	if (r)
1428 		goto err;
1429 
1430 	job->enforce_isolation = true;
1431 
1432 	ib = &job->ibs[0];
1433 	for (i = 0; i <= ring->funcs->align_mask; ++i)
1434 		ib->ptr[i] = ring->funcs->nop;
1435 	ib->length_dw = ring->funcs->align_mask + 1;
1436 
1437 	f = amdgpu_job_submit(job);
1438 
1439 	r = dma_fence_wait(f, false);
1440 	if (r)
1441 		goto err;
1442 
1443 	dma_fence_put(f);
1444 
1445 	/* Clean up the scheduler entity */
1446 	drm_sched_entity_destroy(&entity);
1447 	return 0;
1448 
1449 err:
1450 	return r;
1451 }
1452 
1453 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1454 {
1455 	int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1456 	struct amdgpu_ring *ring;
1457 	int num_xcc_to_clear;
1458 	int i, r, xcc_id;
1459 
1460 	if (adev->gfx.num_xcc_per_xcp)
1461 		num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1462 	else
1463 		num_xcc_to_clear = 1;
1464 
1465 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1466 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1467 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1468 			if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1469 				r = amdgpu_gfx_run_cleaner_shader_job(ring);
1470 				if (r)
1471 					return r;
1472 				num_xcc_to_clear--;
1473 				break;
1474 			}
1475 		}
1476 	}
1477 
1478 	if (num_xcc_to_clear)
1479 		return -ENOENT;
1480 
1481 	return 0;
1482 }
1483 
1484 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1485 						 struct device_attribute *attr,
1486 						 const char *buf,
1487 						 size_t count)
1488 {
1489 	struct drm_device *ddev = dev_get_drvdata(dev);
1490 	struct amdgpu_device *adev = drm_to_adev(ddev);
1491 	int ret;
1492 	long value;
1493 
1494 	if (amdgpu_in_reset(adev))
1495 		return -EPERM;
1496 	if (adev->in_suspend && !adev->in_runpm)
1497 		return -EPERM;
1498 
1499 	ret = kstrtol(buf, 0, &value);
1500 
1501 	if (ret)
1502 		return -EINVAL;
1503 
1504 	if (value < 0)
1505 		return -EINVAL;
1506 
1507 	if (adev->xcp_mgr) {
1508 		if (value >= adev->xcp_mgr->num_xcps)
1509 			return -EINVAL;
1510 	} else {
1511 		if (value > 1)
1512 			return -EINVAL;
1513 	}
1514 
1515 	ret = pm_runtime_get_sync(ddev->dev);
1516 	if (ret < 0) {
1517 		pm_runtime_put_autosuspend(ddev->dev);
1518 		return ret;
1519 	}
1520 
1521 	ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1522 
1523 	pm_runtime_mark_last_busy(ddev->dev);
1524 	pm_runtime_put_autosuspend(ddev->dev);
1525 
1526 	if (ret)
1527 		return ret;
1528 
1529 	return count;
1530 }
1531 
1532 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1533 						struct device_attribute *attr,
1534 						char *buf)
1535 {
1536 	struct drm_device *ddev = dev_get_drvdata(dev);
1537 	struct amdgpu_device *adev = drm_to_adev(ddev);
1538 	int i;
1539 	ssize_t size = 0;
1540 
1541 	if (adev->xcp_mgr) {
1542 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1543 			size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1544 			if (i < (adev->xcp_mgr->num_xcps - 1))
1545 				size += sysfs_emit_at(buf, size, " ");
1546 		}
1547 		buf[size++] = '\n';
1548 	} else {
1549 		size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1550 	}
1551 
1552 	return size;
1553 }
1554 
1555 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1556 						struct device_attribute *attr,
1557 						const char *buf, size_t count)
1558 {
1559 	struct drm_device *ddev = dev_get_drvdata(dev);
1560 	struct amdgpu_device *adev = drm_to_adev(ddev);
1561 	long partition_values[MAX_XCP] = {0};
1562 	int ret, i, num_partitions;
1563 	const char *input_buf = buf;
1564 
1565 	for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1566 		ret = sscanf(input_buf, "%ld", &partition_values[i]);
1567 		if (ret <= 0)
1568 			break;
1569 
1570 		/* Move the pointer to the next value in the string */
1571 		input_buf = strchr(input_buf, ' ');
1572 		if (input_buf) {
1573 			input_buf++;
1574 		} else {
1575 			i++;
1576 			break;
1577 		}
1578 	}
1579 	num_partitions = i;
1580 
1581 	if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1582 		return -EINVAL;
1583 
1584 	if (!adev->xcp_mgr && num_partitions != 1)
1585 		return -EINVAL;
1586 
1587 	for (i = 0; i < num_partitions; i++) {
1588 		if (partition_values[i] != 0 && partition_values[i] != 1)
1589 			return -EINVAL;
1590 	}
1591 
1592 	mutex_lock(&adev->enforce_isolation_mutex);
1593 
1594 	for (i = 0; i < num_partitions; i++) {
1595 		if (adev->enforce_isolation[i] && !partition_values[i]) {
1596 			/* Going from enabled to disabled */
1597 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
1598 		} else if (!adev->enforce_isolation[i] && partition_values[i]) {
1599 			/* Going from disabled to enabled */
1600 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
1601 		}
1602 		adev->enforce_isolation[i] = partition_values[i];
1603 	}
1604 
1605 	mutex_unlock(&adev->enforce_isolation_mutex);
1606 
1607 	return count;
1608 }
1609 
1610 static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
1611 						struct device_attribute *attr,
1612 						char *buf)
1613 {
1614 	struct drm_device *ddev = dev_get_drvdata(dev);
1615 	struct amdgpu_device *adev = drm_to_adev(ddev);
1616 
1617 	if (!adev)
1618 		return -ENODEV;
1619 
1620 	return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
1621 }
1622 
1623 static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
1624 						struct device_attribute *attr,
1625 						char *buf)
1626 {
1627 	struct drm_device *ddev = dev_get_drvdata(dev);
1628 	struct amdgpu_device *adev = drm_to_adev(ddev);
1629 
1630 	if (!adev)
1631 		return -ENODEV;
1632 
1633 	return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
1634 }
1635 
1636 static DEVICE_ATTR(run_cleaner_shader, 0200,
1637 		   NULL, amdgpu_gfx_set_run_cleaner_shader);
1638 
1639 static DEVICE_ATTR(enforce_isolation, 0644,
1640 		   amdgpu_gfx_get_enforce_isolation,
1641 		   amdgpu_gfx_set_enforce_isolation);
1642 
1643 static DEVICE_ATTR(current_compute_partition, 0644,
1644 		   amdgpu_gfx_get_current_compute_partition,
1645 		   amdgpu_gfx_set_compute_partition);
1646 
1647 static DEVICE_ATTR(available_compute_partition, 0444,
1648 		   amdgpu_gfx_get_available_compute_partition, NULL);
1649 static DEVICE_ATTR(gfx_reset_mask, 0444,
1650 		   amdgpu_gfx_get_gfx_reset_mask, NULL);
1651 
1652 static DEVICE_ATTR(compute_reset_mask, 0444,
1653 		   amdgpu_gfx_get_compute_reset_mask, NULL);
1654 
1655 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
1656 {
1657 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1658 	bool xcp_switch_supported;
1659 	int r;
1660 
1661 	if (!xcp_mgr)
1662 		return 0;
1663 
1664 	xcp_switch_supported =
1665 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1666 
1667 	if (!xcp_switch_supported)
1668 		dev_attr_current_compute_partition.attr.mode &=
1669 			~(S_IWUSR | S_IWGRP | S_IWOTH);
1670 
1671 	r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1672 	if (r)
1673 		return r;
1674 
1675 	if (xcp_switch_supported)
1676 		r = device_create_file(adev->dev,
1677 				       &dev_attr_available_compute_partition);
1678 
1679 	return r;
1680 }
1681 
1682 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
1683 {
1684 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1685 	bool xcp_switch_supported;
1686 
1687 	if (!xcp_mgr)
1688 		return;
1689 
1690 	xcp_switch_supported =
1691 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1692 	device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1693 
1694 	if (xcp_switch_supported)
1695 		device_remove_file(adev->dev,
1696 				   &dev_attr_available_compute_partition);
1697 }
1698 
1699 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
1700 {
1701 	int r;
1702 
1703 	r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
1704 	if (r)
1705 		return r;
1706 	if (adev->gfx.enable_cleaner_shader)
1707 		r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
1708 
1709 	return r;
1710 }
1711 
1712 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
1713 {
1714 	device_remove_file(adev->dev, &dev_attr_enforce_isolation);
1715 	if (adev->gfx.enable_cleaner_shader)
1716 		device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
1717 }
1718 
1719 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
1720 {
1721 	int r = 0;
1722 
1723 	if (!amdgpu_gpu_recovery)
1724 		return r;
1725 
1726 	if (adev->gfx.num_gfx_rings) {
1727 		r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
1728 		if (r)
1729 			return r;
1730 	}
1731 
1732 	if (adev->gfx.num_compute_rings) {
1733 		r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
1734 		if (r)
1735 			return r;
1736 	}
1737 
1738 	return r;
1739 }
1740 
1741 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1742 {
1743 	if (!amdgpu_gpu_recovery)
1744 		return;
1745 
1746 	if (adev->gfx.num_gfx_rings)
1747 		device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
1748 
1749 	if (adev->gfx.num_compute_rings)
1750 		device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
1751 }
1752 
1753 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1754 {
1755 	int r;
1756 
1757 	r = amdgpu_gfx_sysfs_xcp_init(adev);
1758 	if (r) {
1759 		dev_err(adev->dev, "failed to create xcp sysfs files");
1760 		return r;
1761 	}
1762 
1763 	r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1764 	if (r)
1765 		dev_err(adev->dev, "failed to create isolation sysfs files");
1766 
1767 	r = amdgpu_gfx_sysfs_reset_mask_init(adev);
1768 	if (r)
1769 		dev_err(adev->dev, "failed to create reset mask sysfs files");
1770 
1771 	return r;
1772 }
1773 
1774 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1775 {
1776 	amdgpu_gfx_sysfs_xcp_fini(adev);
1777 	amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1778 	amdgpu_gfx_sysfs_reset_mask_fini(adev);
1779 }
1780 
1781 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
1782 				      unsigned int cleaner_shader_size)
1783 {
1784 	if (!adev->gfx.enable_cleaner_shader)
1785 		return -EOPNOTSUPP;
1786 
1787 	return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
1788 				       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
1789 				       &adev->gfx.cleaner_shader_obj,
1790 				       &adev->gfx.cleaner_shader_gpu_addr,
1791 				       (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1792 }
1793 
1794 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
1795 {
1796 	if (!adev->gfx.enable_cleaner_shader)
1797 		return;
1798 
1799 	amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
1800 			      &adev->gfx.cleaner_shader_gpu_addr,
1801 			      (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1802 }
1803 
1804 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
1805 				    unsigned int cleaner_shader_size,
1806 				    const void *cleaner_shader_ptr)
1807 {
1808 	if (!adev->gfx.enable_cleaner_shader)
1809 		return;
1810 
1811 	if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
1812 		memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
1813 			    cleaner_shader_size);
1814 }
1815 
1816 /**
1817  * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
1818  * @adev: amdgpu_device pointer
1819  * @idx: Index of the scheduler to control
1820  * @enable: Whether to enable or disable the KFD scheduler
1821  *
1822  * This function is used to control the KFD (Kernel Fusion Driver) scheduler
1823  * from the KGD. It is part of the cleaner shader feature. This function plays
1824  * a key role in enforcing process isolation on the GPU.
1825  *
1826  * The function uses a reference count mechanism (kfd_sch_req_count) to keep
1827  * track of the number of requests to enable the KFD scheduler. When a request
1828  * to enable the KFD scheduler is made, the reference count is decremented.
1829  * When the reference count reaches zero, a delayed work is scheduled to
1830  * enforce isolation after a delay of GFX_SLICE_PERIOD.
1831  *
1832  * When a request to disable the KFD scheduler is made, the function first
1833  * checks if the reference count is zero. If it is, it cancels the delayed work
1834  * for enforcing isolation and checks if the KFD scheduler is active. If the
1835  * KFD scheduler is active, it sends a request to stop the KFD scheduler and
1836  * sets the KFD scheduler state to inactive. Then, it increments the reference
1837  * count.
1838  *
1839  * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
1840  * scheduler state and reference count are updated atomically.
1841  *
1842  * Note: If the reference count is already zero when a request to enable the
1843  * KFD scheduler is made, it means there's an imbalance bug somewhere. The
1844  * function triggers a warning in this case.
1845  */
1846 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
1847 				    bool enable)
1848 {
1849 	mutex_lock(&adev->gfx.kfd_sch_mutex);
1850 
1851 	if (enable) {
1852 		/* If the count is already 0, it means there's an imbalance bug somewhere.
1853 		 * Note that the bug may be in a different caller than the one which triggers the
1854 		 * WARN_ON_ONCE.
1855 		 */
1856 		if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
1857 			dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
1858 			goto unlock;
1859 		}
1860 
1861 		adev->gfx.kfd_sch_req_count[idx]--;
1862 
1863 		if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
1864 		    adev->gfx.kfd_sch_inactive[idx]) {
1865 			schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1866 					      msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
1867 		}
1868 	} else {
1869 		if (adev->gfx.kfd_sch_req_count[idx] == 0) {
1870 			cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
1871 			if (!adev->gfx.kfd_sch_inactive[idx]) {
1872 				amdgpu_amdkfd_stop_sched(adev, idx);
1873 				adev->gfx.kfd_sch_inactive[idx] = true;
1874 			}
1875 		}
1876 
1877 		adev->gfx.kfd_sch_req_count[idx]++;
1878 	}
1879 
1880 unlock:
1881 	mutex_unlock(&adev->gfx.kfd_sch_mutex);
1882 }
1883 
1884 /**
1885  * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
1886  *
1887  * @work: work_struct.
1888  *
1889  * This function is the work handler for enforcing shader isolation on AMD GPUs.
1890  * It counts the number of emitted fences for each GFX and compute ring. If there
1891  * are any fences, it schedules the `enforce_isolation_work` to be run after a
1892  * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
1893  * Driver (KFD) to resume the runqueue. The function is synchronized using the
1894  * `enforce_isolation_mutex`.
1895  */
1896 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
1897 {
1898 	struct amdgpu_isolation_work *isolation_work =
1899 		container_of(work, struct amdgpu_isolation_work, work.work);
1900 	struct amdgpu_device *adev = isolation_work->adev;
1901 	u32 i, idx, fences = 0;
1902 
1903 	if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
1904 		idx = 0;
1905 	else
1906 		idx = isolation_work->xcp_id;
1907 
1908 	if (idx >= MAX_XCP)
1909 		return;
1910 
1911 	mutex_lock(&adev->enforce_isolation_mutex);
1912 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
1913 		if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
1914 			fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
1915 	}
1916 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
1917 		if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
1918 			fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
1919 	}
1920 	if (fences) {
1921 		/* we've already had our timeslice, so let's wrap this up */
1922 		schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1923 				      msecs_to_jiffies(1));
1924 	} else {
1925 		/* Tell KFD to resume the runqueue */
1926 		if (adev->kfd.init_complete) {
1927 			WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
1928 			WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
1929 				amdgpu_amdkfd_start_sched(adev, idx);
1930 				adev->gfx.kfd_sch_inactive[idx] = false;
1931 		}
1932 	}
1933 	mutex_unlock(&adev->enforce_isolation_mutex);
1934 }
1935 
1936 static void
1937 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
1938 					  u32 idx)
1939 {
1940 	unsigned long cjiffies;
1941 	bool wait = false;
1942 
1943 	mutex_lock(&adev->enforce_isolation_mutex);
1944 	if (adev->enforce_isolation[idx]) {
1945 		/* set the initial values if nothing is set */
1946 		if (!adev->gfx.enforce_isolation_jiffies[idx]) {
1947 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
1948 			adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
1949 		}
1950 		/* Make sure KFD gets a chance to run */
1951 		if (amdgpu_amdkfd_compute_active(adev, idx)) {
1952 			cjiffies = jiffies;
1953 			if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
1954 				cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
1955 				if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
1956 					/* if our time is up, let KGD work drain before scheduling more */
1957 					wait = true;
1958 					/* reset the timer period */
1959 					adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
1960 				} else {
1961 					/* set the timer period to what's left in our time slice */
1962 					adev->gfx.enforce_isolation_time[idx] =
1963 						GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
1964 				}
1965 			} else {
1966 				/* if jiffies wrap around we will just wait a little longer */
1967 				adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
1968 			}
1969 		} else {
1970 			/* if there is no KFD work, then set the full slice period */
1971 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
1972 			adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
1973 		}
1974 	}
1975 	mutex_unlock(&adev->enforce_isolation_mutex);
1976 
1977 	if (wait)
1978 		msleep(GFX_SLICE_PERIOD_MS);
1979 }
1980 
1981 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
1982 {
1983 	struct amdgpu_device *adev = ring->adev;
1984 	u32 idx;
1985 
1986 	if (!adev->gfx.enable_cleaner_shader)
1987 		return;
1988 
1989 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
1990 		idx = 0;
1991 	else
1992 		idx = ring->xcp_id;
1993 
1994 	if (idx >= MAX_XCP)
1995 		return;
1996 
1997 	/* Don't submit more work until KFD has had some time */
1998 	amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
1999 
2000 	mutex_lock(&adev->enforce_isolation_mutex);
2001 	if (adev->enforce_isolation[idx]) {
2002 		if (adev->kfd.init_complete)
2003 			amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
2004 	}
2005 	mutex_unlock(&adev->enforce_isolation_mutex);
2006 }
2007 
2008 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2009 {
2010 	struct amdgpu_device *adev = ring->adev;
2011 	u32 idx;
2012 
2013 	if (!adev->gfx.enable_cleaner_shader)
2014 		return;
2015 
2016 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2017 		idx = 0;
2018 	else
2019 		idx = ring->xcp_id;
2020 
2021 	if (idx >= MAX_XCP)
2022 		return;
2023 
2024 	mutex_lock(&adev->enforce_isolation_mutex);
2025 	if (adev->enforce_isolation[idx]) {
2026 		if (adev->kfd.init_complete)
2027 			amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
2028 	}
2029 	mutex_unlock(&adev->enforce_isolation_mutex);
2030 }
2031 
2032 /*
2033  * debugfs for to enable/disable gfx job submission to specific core.
2034  */
2035 #if defined(CONFIG_DEBUG_FS)
2036 static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
2037 {
2038 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2039 	u32 i;
2040 	u64 mask = 0;
2041 	struct amdgpu_ring *ring;
2042 
2043 	if (!adev)
2044 		return -ENODEV;
2045 
2046 	mask = (1 << adev->gfx.num_gfx_rings) - 1;
2047 	if ((val & mask) == 0)
2048 		return -EINVAL;
2049 
2050 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2051 		ring = &adev->gfx.gfx_ring[i];
2052 		if (val & (1 << i))
2053 			ring->sched.ready = true;
2054 		else
2055 			ring->sched.ready = false;
2056 	}
2057 	/* publish sched.ready flag update effective immediately across smp */
2058 	smp_rmb();
2059 	return 0;
2060 }
2061 
2062 static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
2063 {
2064 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2065 	u32 i;
2066 	u64 mask = 0;
2067 	struct amdgpu_ring *ring;
2068 
2069 	if (!adev)
2070 		return -ENODEV;
2071 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2072 		ring = &adev->gfx.gfx_ring[i];
2073 		if (ring->sched.ready)
2074 			mask |= 1 << i;
2075 	}
2076 
2077 	*val = mask;
2078 	return 0;
2079 }
2080 
2081 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
2082 			 amdgpu_debugfs_gfx_sched_mask_get,
2083 			 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
2084 
2085 #endif
2086 
2087 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
2088 {
2089 #if defined(CONFIG_DEBUG_FS)
2090 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2091 	struct dentry *root = minor->debugfs_root;
2092 	char name[32];
2093 
2094 	if (!(adev->gfx.num_gfx_rings > 1))
2095 		return;
2096 	sprintf(name, "amdgpu_gfx_sched_mask");
2097 	debugfs_create_file(name, 0600, root, adev,
2098 			    &amdgpu_debugfs_gfx_sched_mask_fops);
2099 #endif
2100 }
2101 
2102 /*
2103  * debugfs for to enable/disable compute job submission to specific core.
2104  */
2105 #if defined(CONFIG_DEBUG_FS)
2106 static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
2107 {
2108 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2109 	u32 i;
2110 	u64 mask = 0;
2111 	struct amdgpu_ring *ring;
2112 
2113 	if (!adev)
2114 		return -ENODEV;
2115 
2116 	mask = (1 << adev->gfx.num_compute_rings) - 1;
2117 	if ((val & mask) == 0)
2118 		return -EINVAL;
2119 
2120 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2121 		ring = &adev->gfx.compute_ring[i];
2122 		if (val & (1 << i))
2123 			ring->sched.ready = true;
2124 		else
2125 			ring->sched.ready = false;
2126 	}
2127 
2128 	/* publish sched.ready flag update effective immediately across smp */
2129 	smp_rmb();
2130 	return 0;
2131 }
2132 
2133 static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
2134 {
2135 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2136 	u32 i;
2137 	u64 mask = 0;
2138 	struct amdgpu_ring *ring;
2139 
2140 	if (!adev)
2141 		return -ENODEV;
2142 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2143 		ring = &adev->gfx.compute_ring[i];
2144 		if (ring->sched.ready)
2145 			mask |= 1 << i;
2146 	}
2147 
2148 	*val = mask;
2149 	return 0;
2150 }
2151 
2152 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
2153 			 amdgpu_debugfs_compute_sched_mask_get,
2154 			 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
2155 
2156 #endif
2157 
2158 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
2159 {
2160 #if defined(CONFIG_DEBUG_FS)
2161 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2162 	struct dentry *root = minor->debugfs_root;
2163 	char name[32];
2164 
2165 	if (!(adev->gfx.num_compute_rings > 1))
2166 		return;
2167 	sprintf(name, "amdgpu_compute_sched_mask");
2168 	debugfs_create_file(name, 0600, root, adev,
2169 			    &amdgpu_debugfs_compute_sched_mask_fops);
2170 #endif
2171 }
2172