xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c (revision c7062be3380cb20c8b1c4a935a13f1848ead0719)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
36 #include "amdgpu_mes.h"
37 #include "nvd.h"
38 
39 /* delay 0.1 second to enable gfx off feature */
40 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
41 
42 #define GFX_OFF_NO_DELAY 0
43 
44 /*
45  * GPU GFX IP block helpers function.
46  */
47 
48 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
49 				int pipe, int queue)
50 {
51 	int bit = 0;
52 
53 	bit += mec * adev->gfx.mec.num_pipe_per_mec
54 		* adev->gfx.mec.num_queue_per_pipe;
55 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
56 	bit += queue;
57 
58 	return bit;
59 }
60 
61 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
62 				 int *mec, int *pipe, int *queue)
63 {
64 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
65 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
66 		% adev->gfx.mec.num_pipe_per_mec;
67 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
68 	       / adev->gfx.mec.num_pipe_per_mec;
69 
70 }
71 
72 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
73 				     int xcc_id, int mec, int pipe, int queue)
74 {
75 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
76 			adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
77 }
78 
79 static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
80 				      int me, int pipe, int queue)
81 {
82 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
83 	int bit = 0;
84 
85 	bit += me * adev->gfx.me.num_pipe_per_me
86 		* num_queue_per_pipe;
87 	bit += pipe * num_queue_per_pipe;
88 	bit += queue;
89 
90 	return bit;
91 }
92 
93 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
94 				    int me, int pipe, int queue)
95 {
96 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
97 			adev->gfx.me.queue_bitmap);
98 }
99 
100 /**
101  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
102  *
103  * @mask: array in which the per-shader array disable masks will be stored
104  * @max_se: number of SEs
105  * @max_sh: number of SHs
106  *
107  * The bitmask of CUs to be disabled in the shader array determined by se and
108  * sh is stored in mask[se * max_sh + sh].
109  */
110 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
111 {
112 	unsigned int se, sh, cu;
113 	const char *p;
114 
115 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
116 
117 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
118 		return;
119 
120 	p = amdgpu_disable_cu;
121 	for (;;) {
122 		char *next;
123 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
124 
125 		if (ret < 3) {
126 			DRM_ERROR("amdgpu: could not parse disable_cu\n");
127 			return;
128 		}
129 
130 		if (se < max_se && sh < max_sh && cu < 16) {
131 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
132 			mask[se * max_sh + sh] |= 1u << cu;
133 		} else {
134 			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
135 				  se, sh, cu);
136 		}
137 
138 		next = strchr(p, ',');
139 		if (!next)
140 			break;
141 		p = next + 1;
142 	}
143 }
144 
145 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
146 {
147 	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
148 }
149 
150 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
151 {
152 	if (amdgpu_compute_multipipe != -1) {
153 		dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
154 			 amdgpu_compute_multipipe);
155 		return amdgpu_compute_multipipe == 1;
156 	}
157 
158 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
159 		return true;
160 
161 	/* FIXME: spreading the queues across pipes causes perf regressions
162 	 * on POLARIS11 compute workloads */
163 	if (adev->asic_type == CHIP_POLARIS11)
164 		return false;
165 
166 	return adev->gfx.mec.num_mec > 1;
167 }
168 
169 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
170 						struct amdgpu_ring *ring)
171 {
172 	int queue = ring->queue;
173 	int pipe = ring->pipe;
174 
175 	/* Policy: use pipe1 queue0 as high priority graphics queue if we
176 	 * have more than one gfx pipe.
177 	 */
178 	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
179 	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
180 		int me = ring->me;
181 		int bit;
182 
183 		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
184 		if (ring == &adev->gfx.gfx_ring[bit])
185 			return true;
186 	}
187 
188 	return false;
189 }
190 
191 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
192 					       struct amdgpu_ring *ring)
193 {
194 	/* Policy: use 1st queue as high priority compute queue if we
195 	 * have more than one compute queue.
196 	 */
197 	if (adev->gfx.num_compute_rings > 1 &&
198 	    ring == &adev->gfx.compute_ring[0])
199 		return true;
200 
201 	return false;
202 }
203 
204 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
205 {
206 	int i, j, queue, pipe;
207 	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
208 	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
209 				     adev->gfx.mec.num_queue_per_pipe,
210 				     adev->gfx.num_compute_rings);
211 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
212 
213 	if (multipipe_policy) {
214 		/* policy: make queues evenly cross all pipes on MEC1 only
215 		 * for multiple xcc, just use the original policy for simplicity */
216 		for (j = 0; j < num_xcc; j++) {
217 			for (i = 0; i < max_queues_per_mec; i++) {
218 				pipe = i % adev->gfx.mec.num_pipe_per_mec;
219 				queue = (i / adev->gfx.mec.num_pipe_per_mec) %
220 					 adev->gfx.mec.num_queue_per_pipe;
221 
222 				set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
223 					adev->gfx.mec_bitmap[j].queue_bitmap);
224 			}
225 		}
226 	} else {
227 		/* policy: amdgpu owns all queues in the given pipe */
228 		for (j = 0; j < num_xcc; j++) {
229 			for (i = 0; i < max_queues_per_mec; ++i)
230 				set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
231 		}
232 	}
233 
234 	for (j = 0; j < num_xcc; j++) {
235 		dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
236 			bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
237 	}
238 }
239 
240 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
241 {
242 	int i, queue, pipe;
243 	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
244 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
245 	int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
246 
247 	if (multipipe_policy) {
248 		/* policy: amdgpu owns the first queue per pipe at this stage
249 		 * will extend to mulitple queues per pipe later */
250 		for (i = 0; i < max_queues_per_me; i++) {
251 			pipe = i % adev->gfx.me.num_pipe_per_me;
252 			queue = (i / adev->gfx.me.num_pipe_per_me) %
253 				num_queue_per_pipe;
254 
255 			set_bit(pipe * num_queue_per_pipe + queue,
256 				adev->gfx.me.queue_bitmap);
257 		}
258 	} else {
259 		for (i = 0; i < max_queues_per_me; ++i)
260 			set_bit(i, adev->gfx.me.queue_bitmap);
261 	}
262 
263 	/* update the number of active graphics rings */
264 	if (adev->gfx.num_gfx_rings)
265 		adev->gfx.num_gfx_rings =
266 			bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
267 }
268 
269 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
270 				  struct amdgpu_ring *ring, int xcc_id)
271 {
272 	int queue_bit;
273 	int mec, pipe, queue;
274 
275 	queue_bit = adev->gfx.mec.num_mec
276 		    * adev->gfx.mec.num_pipe_per_mec
277 		    * adev->gfx.mec.num_queue_per_pipe;
278 
279 	while (--queue_bit >= 0) {
280 		if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
281 			continue;
282 
283 		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
284 
285 		/*
286 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
287 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
288 		 * only can be issued on queue 0.
289 		 */
290 		if ((mec == 1 && pipe > 1) || queue != 0)
291 			continue;
292 
293 		ring->me = mec + 1;
294 		ring->pipe = pipe;
295 		ring->queue = queue;
296 
297 		return 0;
298 	}
299 
300 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
301 	return -EINVAL;
302 }
303 
304 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
305 {
306 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
307 	struct amdgpu_irq_src *irq = &kiq->irq;
308 	struct amdgpu_ring *ring = &kiq->ring;
309 	int r = 0;
310 
311 	spin_lock_init(&kiq->ring_lock);
312 
313 	ring->adev = NULL;
314 	ring->ring_obj = NULL;
315 	ring->use_doorbell = true;
316 	ring->xcc_id = xcc_id;
317 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
318 	ring->doorbell_index =
319 		(adev->doorbell_index.kiq +
320 		 xcc_id * adev->doorbell_index.xcc_doorbell_range)
321 		<< 1;
322 
323 	r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
324 	if (r)
325 		return r;
326 
327 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
328 	ring->no_scheduler = true;
329 	snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
330 		 (unsigned char)xcc_id, (unsigned char)ring->me,
331 		 (unsigned char)ring->pipe, (unsigned char)ring->queue);
332 	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
333 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
334 	if (r)
335 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
336 
337 	return r;
338 }
339 
340 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
341 {
342 	amdgpu_ring_fini(ring);
343 }
344 
345 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
346 {
347 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
348 
349 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
350 }
351 
352 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
353 			unsigned int hpd_size, int xcc_id)
354 {
355 	int r;
356 	u32 *hpd;
357 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
358 
359 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
360 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
361 				    &kiq->eop_gpu_addr, (void **)&hpd);
362 	if (r) {
363 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
364 		return r;
365 	}
366 
367 	memset(hpd, 0, hpd_size);
368 
369 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
370 	if (unlikely(r != 0))
371 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
372 	amdgpu_bo_kunmap(kiq->eop_obj);
373 	amdgpu_bo_unreserve(kiq->eop_obj);
374 
375 	return 0;
376 }
377 
378 /* create MQD for each compute/gfx queue */
379 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
380 			   unsigned int mqd_size, int xcc_id)
381 {
382 	int r, i, j;
383 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
384 	struct amdgpu_ring *ring = &kiq->ring;
385 	u32 domain = AMDGPU_GEM_DOMAIN_GTT;
386 
387 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
388 	/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
389 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
390 		domain |= AMDGPU_GEM_DOMAIN_VRAM;
391 #endif
392 
393 	/* create MQD for KIQ */
394 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
395 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
396 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
397 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
398 		 * KIQ MQD no matter SRIOV or Bare-metal
399 		 */
400 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
401 					    AMDGPU_GEM_DOMAIN_VRAM |
402 					    AMDGPU_GEM_DOMAIN_GTT,
403 					    &ring->mqd_obj,
404 					    &ring->mqd_gpu_addr,
405 					    &ring->mqd_ptr);
406 		if (r) {
407 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
408 			return r;
409 		}
410 
411 		/* prepare MQD backup */
412 		kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
413 		if (!kiq->mqd_backup) {
414 			dev_warn(adev->dev,
415 				 "no memory to create MQD backup for ring %s\n", ring->name);
416 			return -ENOMEM;
417 		}
418 	}
419 
420 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
421 		/* create MQD for each KGQ */
422 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
423 			ring = &adev->gfx.gfx_ring[i];
424 			if (!ring->mqd_obj) {
425 				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
426 							    domain, &ring->mqd_obj,
427 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
428 				if (r) {
429 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
430 					return r;
431 				}
432 
433 				ring->mqd_size = mqd_size;
434 				/* prepare MQD backup */
435 				adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
436 				if (!adev->gfx.me.mqd_backup[i]) {
437 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
438 					return -ENOMEM;
439 				}
440 			}
441 		}
442 	}
443 
444 	/* create MQD for each KCQ */
445 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
446 		j = i + xcc_id * adev->gfx.num_compute_rings;
447 		ring = &adev->gfx.compute_ring[j];
448 		if (!ring->mqd_obj) {
449 			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
450 						    domain, &ring->mqd_obj,
451 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
452 			if (r) {
453 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
454 				return r;
455 			}
456 
457 			ring->mqd_size = mqd_size;
458 			/* prepare MQD backup */
459 			adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
460 			if (!adev->gfx.mec.mqd_backup[j]) {
461 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
462 				return -ENOMEM;
463 			}
464 		}
465 	}
466 
467 	return 0;
468 }
469 
470 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
471 {
472 	struct amdgpu_ring *ring = NULL;
473 	int i, j;
474 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
475 
476 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
477 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
478 			ring = &adev->gfx.gfx_ring[i];
479 			kfree(adev->gfx.me.mqd_backup[i]);
480 			amdgpu_bo_free_kernel(&ring->mqd_obj,
481 					      &ring->mqd_gpu_addr,
482 					      &ring->mqd_ptr);
483 		}
484 	}
485 
486 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
487 		j = i + xcc_id * adev->gfx.num_compute_rings;
488 		ring = &adev->gfx.compute_ring[j];
489 		kfree(adev->gfx.mec.mqd_backup[j]);
490 		amdgpu_bo_free_kernel(&ring->mqd_obj,
491 				      &ring->mqd_gpu_addr,
492 				      &ring->mqd_ptr);
493 	}
494 
495 	ring = &kiq->ring;
496 	kfree(kiq->mqd_backup);
497 	amdgpu_bo_free_kernel(&ring->mqd_obj,
498 			      &ring->mqd_gpu_addr,
499 			      &ring->mqd_ptr);
500 }
501 
502 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
503 {
504 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
505 	struct amdgpu_ring *kiq_ring = &kiq->ring;
506 	int i, r = 0;
507 	int j;
508 
509 	if (adev->enable_mes) {
510 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
511 			j = i + xcc_id * adev->gfx.num_compute_rings;
512 			amdgpu_mes_unmap_legacy_queue(adev,
513 						   &adev->gfx.compute_ring[j],
514 						   RESET_QUEUES, 0, 0, xcc_id);
515 		}
516 		return 0;
517 	}
518 
519 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
520 		return -EINVAL;
521 
522 	if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
523 		return 0;
524 
525 	spin_lock(&kiq->ring_lock);
526 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
527 					adev->gfx.num_compute_rings)) {
528 		spin_unlock(&kiq->ring_lock);
529 		return -ENOMEM;
530 	}
531 
532 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
533 		j = i + xcc_id * adev->gfx.num_compute_rings;
534 		kiq->pmf->kiq_unmap_queues(kiq_ring,
535 					   &adev->gfx.compute_ring[j],
536 					   RESET_QUEUES, 0, 0);
537 	}
538 	/* Submit unmap queue packet */
539 	amdgpu_ring_commit(kiq_ring);
540 	/*
541 	 * Ring test will do a basic scratch register change check. Just run
542 	 * this to ensure that unmap queues that is submitted before got
543 	 * processed successfully before returning.
544 	 */
545 	r = amdgpu_ring_test_helper(kiq_ring);
546 
547 	spin_unlock(&kiq->ring_lock);
548 
549 	return r;
550 }
551 
552 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
553 {
554 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
555 	struct amdgpu_ring *kiq_ring = &kiq->ring;
556 	int i, r = 0;
557 	int j;
558 
559 	if (adev->enable_mes) {
560 		if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
561 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
562 				j = i + xcc_id * adev->gfx.num_gfx_rings;
563 				amdgpu_mes_unmap_legacy_queue(adev,
564 						      &adev->gfx.gfx_ring[j],
565 						      PREEMPT_QUEUES, 0, 0, xcc_id);
566 			}
567 		}
568 		return 0;
569 	}
570 
571 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
572 		return -EINVAL;
573 
574 	if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
575 		return 0;
576 
577 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
578 		spin_lock(&kiq->ring_lock);
579 		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
580 						adev->gfx.num_gfx_rings)) {
581 			spin_unlock(&kiq->ring_lock);
582 			return -ENOMEM;
583 		}
584 
585 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
586 			j = i + xcc_id * adev->gfx.num_gfx_rings;
587 			kiq->pmf->kiq_unmap_queues(kiq_ring,
588 						   &adev->gfx.gfx_ring[j],
589 						   PREEMPT_QUEUES, 0, 0);
590 		}
591 		/* Submit unmap queue packet */
592 		amdgpu_ring_commit(kiq_ring);
593 
594 		/*
595 		 * Ring test will do a basic scratch register change check.
596 		 * Just run this to ensure that unmap queues that is submitted
597 		 * before got processed successfully before returning.
598 		 */
599 		r = amdgpu_ring_test_helper(kiq_ring);
600 		spin_unlock(&kiq->ring_lock);
601 	}
602 
603 	return r;
604 }
605 
606 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
607 					int queue_bit)
608 {
609 	int mec, pipe, queue;
610 	int set_resource_bit = 0;
611 
612 	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
613 
614 	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
615 
616 	return set_resource_bit;
617 }
618 
619 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
620 {
621 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
622 	struct amdgpu_ring *kiq_ring = &kiq->ring;
623 	uint64_t queue_mask = ~0ULL;
624 	int r, i, j;
625 
626 	amdgpu_device_flush_hdp(adev, NULL);
627 
628 	if (!adev->enable_uni_mes) {
629 		spin_lock(&kiq->ring_lock);
630 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
631 		if (r) {
632 			dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
633 			spin_unlock(&kiq->ring_lock);
634 			return r;
635 		}
636 
637 		kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
638 		r = amdgpu_ring_test_helper(kiq_ring);
639 		spin_unlock(&kiq->ring_lock);
640 		if (r)
641 			dev_err(adev->dev, "KIQ failed to set resources\n");
642 	}
643 
644 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
645 		j = i + xcc_id * adev->gfx.num_compute_rings;
646 		r = amdgpu_mes_map_legacy_queue(adev,
647 						&adev->gfx.compute_ring[j],
648 						xcc_id);
649 		if (r) {
650 			dev_err(adev->dev, "failed to map compute queue\n");
651 			return r;
652 		}
653 	}
654 
655 	return 0;
656 }
657 
658 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
659 {
660 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
661 	struct amdgpu_ring *kiq_ring = &kiq->ring;
662 	uint64_t queue_mask = 0;
663 	int r, i, j;
664 
665 	if (adev->mes.enable_legacy_queue_map)
666 		return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
667 
668 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
669 		return -EINVAL;
670 
671 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
672 		if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
673 			continue;
674 
675 		/* This situation may be hit in the future if a new HW
676 		 * generation exposes more than 64 queues. If so, the
677 		 * definition of queue_mask needs updating */
678 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
679 			dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
680 			break;
681 		}
682 
683 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
684 	}
685 
686 	amdgpu_device_flush_hdp(adev, NULL);
687 
688 	dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
689 		 kiq_ring->pipe, kiq_ring->queue);
690 
691 	spin_lock(&kiq->ring_lock);
692 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
693 					adev->gfx.num_compute_rings +
694 					kiq->pmf->set_resources_size);
695 	if (r) {
696 		dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
697 		spin_unlock(&kiq->ring_lock);
698 		return r;
699 	}
700 
701 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
702 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
703 		j = i + xcc_id * adev->gfx.num_compute_rings;
704 		kiq->pmf->kiq_map_queues(kiq_ring,
705 					 &adev->gfx.compute_ring[j]);
706 	}
707 	/* Submit map queue packet */
708 	amdgpu_ring_commit(kiq_ring);
709 	/*
710 	 * Ring test will do a basic scratch register change check. Just run
711 	 * this to ensure that map queues that is submitted before got
712 	 * processed successfully before returning.
713 	 */
714 	r = amdgpu_ring_test_helper(kiq_ring);
715 	spin_unlock(&kiq->ring_lock);
716 	if (r)
717 		dev_err(adev->dev, "KCQ enable failed\n");
718 
719 	return r;
720 }
721 
722 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
723 {
724 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
725 	struct amdgpu_ring *kiq_ring = &kiq->ring;
726 	int r, i, j;
727 
728 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
729 		return -EINVAL;
730 
731 	amdgpu_device_flush_hdp(adev, NULL);
732 
733 	if (adev->mes.enable_legacy_queue_map) {
734 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
735 			j = i + xcc_id * adev->gfx.num_gfx_rings;
736 			r = amdgpu_mes_map_legacy_queue(adev,
737 							&adev->gfx.gfx_ring[j],
738 							xcc_id);
739 			if (r) {
740 				dev_err(adev->dev, "failed to map gfx queue\n");
741 				return r;
742 			}
743 		}
744 
745 		return 0;
746 	}
747 
748 	spin_lock(&kiq->ring_lock);
749 	/* No need to map kcq on the slave */
750 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
751 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
752 						adev->gfx.num_gfx_rings);
753 		if (r) {
754 			dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
755 			spin_unlock(&kiq->ring_lock);
756 			return r;
757 		}
758 
759 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
760 			j = i + xcc_id * adev->gfx.num_gfx_rings;
761 			kiq->pmf->kiq_map_queues(kiq_ring,
762 						 &adev->gfx.gfx_ring[j]);
763 		}
764 	}
765 	/* Submit map queue packet */
766 	amdgpu_ring_commit(kiq_ring);
767 	/*
768 	 * Ring test will do a basic scratch register change check. Just run
769 	 * this to ensure that map queues that is submitted before got
770 	 * processed successfully before returning.
771 	 */
772 	r = amdgpu_ring_test_helper(kiq_ring);
773 	spin_unlock(&kiq->ring_lock);
774 	if (r)
775 		dev_err(adev->dev, "KGQ enable failed\n");
776 
777 	return r;
778 }
779 
780 static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
781 				   bool no_delay)
782 {
783 	unsigned long delay = GFX_OFF_DELAY_ENABLE;
784 
785 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
786 		return;
787 
788 	mutex_lock(&adev->gfx.gfx_off_mutex);
789 
790 	if (enable) {
791 		/* If the count is already 0, it means there's an imbalance bug somewhere.
792 		 * Note that the bug may be in a different caller than the one which triggers the
793 		 * WARN_ON_ONCE.
794 		 */
795 		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
796 			goto unlock;
797 
798 		adev->gfx.gfx_off_req_count--;
799 
800 		if (adev->gfx.gfx_off_req_count == 0 &&
801 		    !adev->gfx.gfx_off_state) {
802 			/* If going to s2idle, no need to wait */
803 			if (no_delay) {
804 				if (!amdgpu_dpm_set_powergating_by_smu(adev,
805 						AMD_IP_BLOCK_TYPE_GFX, true, 0))
806 					adev->gfx.gfx_off_state = true;
807 			} else {
808 				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
809 					      delay);
810 			}
811 		}
812 	} else {
813 		if (adev->gfx.gfx_off_req_count == 0) {
814 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
815 
816 			if (adev->gfx.gfx_off_state &&
817 			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
818 				adev->gfx.gfx_off_state = false;
819 
820 				if (adev->gfx.funcs->init_spm_golden) {
821 					dev_dbg(adev->dev,
822 						"GFXOFF is disabled, re-init SPM golden settings\n");
823 					amdgpu_gfx_init_spm_golden(adev);
824 				}
825 			}
826 		}
827 
828 		adev->gfx.gfx_off_req_count++;
829 	}
830 
831 unlock:
832 	mutex_unlock(&adev->gfx.gfx_off_mutex);
833 }
834 
835 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
836  *
837  * @adev: amdgpu_device pointer
838  * @bool enable true: enable gfx off feature, false: disable gfx off feature
839  *
840  * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
841  * 2. other client can send request to disable gfx off feature, the request should be honored.
842  * 3. other client can cancel their request of disable gfx off feature
843  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
844  *
845  * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
846  */
847 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
848 {
849 	/* If going to s2idle, no need to wait */
850 	bool no_delay = adev->in_s0ix ? true : false;
851 
852 	amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
853 }
854 
855 /* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
856  *
857  * @adev: amdgpu_device pointer
858  * @bool enable true: enable gfx off feature, false: disable gfx off feature
859  *
860  * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
861  * 2. other client can send request to disable gfx off feature, the request should be honored.
862  * 3. other client can cancel their request of disable gfx off feature
863  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
864  *
865  * gfx off allow will be issued immediately.
866  */
867 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
868 {
869 	amdgpu_gfx_do_off_ctrl(adev, enable, true);
870 }
871 
872 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
873 {
874 	int r = 0;
875 
876 	mutex_lock(&adev->gfx.gfx_off_mutex);
877 
878 	r = amdgpu_dpm_set_residency_gfxoff(adev, value);
879 
880 	mutex_unlock(&adev->gfx.gfx_off_mutex);
881 
882 	return r;
883 }
884 
885 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
886 {
887 	int r = 0;
888 
889 	mutex_lock(&adev->gfx.gfx_off_mutex);
890 
891 	r = amdgpu_dpm_get_residency_gfxoff(adev, value);
892 
893 	mutex_unlock(&adev->gfx.gfx_off_mutex);
894 
895 	return r;
896 }
897 
898 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
899 {
900 	int r = 0;
901 
902 	mutex_lock(&adev->gfx.gfx_off_mutex);
903 
904 	r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
905 
906 	mutex_unlock(&adev->gfx.gfx_off_mutex);
907 
908 	return r;
909 }
910 
911 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
912 {
913 
914 	int r = 0;
915 
916 	mutex_lock(&adev->gfx.gfx_off_mutex);
917 
918 	r = amdgpu_dpm_get_status_gfxoff(adev, value);
919 
920 	mutex_unlock(&adev->gfx.gfx_off_mutex);
921 
922 	return r;
923 }
924 
925 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
926 {
927 	int r;
928 
929 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
930 		if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
931 			r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
932 			if (r)
933 				return r;
934 		}
935 
936 		r = amdgpu_ras_block_late_init(adev, ras_block);
937 		if (r)
938 			return r;
939 
940 		if (amdgpu_sriov_vf(adev))
941 			return r;
942 
943 		if (adev->gfx.cp_ecc_error_irq.funcs) {
944 			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
945 			if (r)
946 				goto late_fini;
947 		}
948 	} else {
949 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
950 	}
951 
952 	return 0;
953 late_fini:
954 	amdgpu_ras_block_late_fini(adev, ras_block);
955 	return r;
956 }
957 
958 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
959 {
960 	int err = 0;
961 	struct amdgpu_gfx_ras *ras = NULL;
962 
963 	/* adev->gfx.ras is NULL, which means gfx does not
964 	 * support ras function, then do nothing here.
965 	 */
966 	if (!adev->gfx.ras)
967 		return 0;
968 
969 	ras = adev->gfx.ras;
970 
971 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
972 	if (err) {
973 		dev_err(adev->dev, "Failed to register gfx ras block!\n");
974 		return err;
975 	}
976 
977 	strcpy(ras->ras_block.ras_comm.name, "gfx");
978 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
979 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
980 	adev->gfx.ras_if = &ras->ras_block.ras_comm;
981 
982 	/* If not define special ras_late_init function, use gfx default ras_late_init */
983 	if (!ras->ras_block.ras_late_init)
984 		ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
985 
986 	/* If not defined special ras_cb function, use default ras_cb */
987 	if (!ras->ras_block.ras_cb)
988 		ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
989 
990 	return 0;
991 }
992 
993 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
994 						struct amdgpu_iv_entry *entry)
995 {
996 	if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
997 		return adev->gfx.ras->poison_consumption_handler(adev, entry);
998 
999 	return 0;
1000 }
1001 
1002 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
1003 		void *err_data,
1004 		struct amdgpu_iv_entry *entry)
1005 {
1006 	/* TODO ue will trigger an interrupt.
1007 	 *
1008 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
1009 	 * be disabled and the driver should only look for the aggregated
1010 	 * interrupt via sync flood
1011 	 */
1012 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
1013 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
1014 		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
1015 		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
1016 			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1017 		amdgpu_ras_reset_gpu(adev);
1018 	}
1019 	return AMDGPU_RAS_SUCCESS;
1020 }
1021 
1022 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
1023 				  struct amdgpu_irq_src *source,
1024 				  struct amdgpu_iv_entry *entry)
1025 {
1026 	struct ras_common_if *ras_if = adev->gfx.ras_if;
1027 	struct ras_dispatch_if ih_data = {
1028 		.entry = entry,
1029 	};
1030 
1031 	if (!ras_if)
1032 		return 0;
1033 
1034 	ih_data.head = *ras_if;
1035 
1036 	dev_err(adev->dev, "CP ECC ERROR IRQ\n");
1037 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1038 	return 0;
1039 }
1040 
1041 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
1042 		void *ras_error_status,
1043 		void (*func)(struct amdgpu_device *adev, void *ras_error_status,
1044 				int xcc_id))
1045 {
1046 	int i;
1047 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1048 	uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1049 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1050 
1051 	if (err_data) {
1052 		err_data->ue_count = 0;
1053 		err_data->ce_count = 0;
1054 	}
1055 
1056 	for_each_inst(i, xcc_mask)
1057 		func(adev, ras_error_status, i);
1058 }
1059 
1060 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1061 {
1062 	signed long r, cnt = 0;
1063 	unsigned long flags;
1064 	uint32_t seq, reg_val_offs = 0, value = 0;
1065 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1066 	struct amdgpu_ring *ring = &kiq->ring;
1067 
1068 	if (amdgpu_device_skip_hw_access(adev))
1069 		return 0;
1070 
1071 	if (adev->mes.ring[0].sched.ready)
1072 		return amdgpu_mes_rreg(adev, reg, xcc_id);
1073 
1074 	BUG_ON(!ring->funcs->emit_rreg);
1075 
1076 	spin_lock_irqsave(&kiq->ring_lock, flags);
1077 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
1078 		pr_err("critical bug! too many kiq readers\n");
1079 		goto failed_unlock;
1080 	}
1081 	r = amdgpu_ring_alloc(ring, 32);
1082 	if (r)
1083 		goto failed_unlock;
1084 
1085 	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1086 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1087 	if (r)
1088 		goto failed_undo;
1089 
1090 	amdgpu_ring_commit(ring);
1091 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1092 
1093 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1094 
1095 	/* don't wait anymore for gpu reset case because this way may
1096 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1097 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1098 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1099 	 * gpu_recover() hang there.
1100 	 *
1101 	 * also don't wait anymore for IRQ context
1102 	 * */
1103 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1104 		goto failed_kiq_read;
1105 
1106 	might_sleep();
1107 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1108 		if (amdgpu_in_reset(adev))
1109 			goto failed_kiq_read;
1110 
1111 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1112 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1113 	}
1114 
1115 	if (cnt > MAX_KIQ_REG_TRY)
1116 		goto failed_kiq_read;
1117 
1118 	mb();
1119 	value = adev->wb.wb[reg_val_offs];
1120 	amdgpu_device_wb_free(adev, reg_val_offs);
1121 	return value;
1122 
1123 failed_undo:
1124 	amdgpu_ring_undo(ring);
1125 failed_unlock:
1126 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1127 failed_kiq_read:
1128 	if (reg_val_offs)
1129 		amdgpu_device_wb_free(adev, reg_val_offs);
1130 	dev_err(adev->dev, "failed to read reg:%x\n", reg);
1131 	return ~0;
1132 }
1133 
1134 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1135 {
1136 	signed long r, cnt = 0;
1137 	unsigned long flags;
1138 	uint32_t seq;
1139 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1140 	struct amdgpu_ring *ring = &kiq->ring;
1141 
1142 	BUG_ON(!ring->funcs->emit_wreg);
1143 
1144 	if (amdgpu_device_skip_hw_access(adev))
1145 		return;
1146 
1147 	if (adev->mes.ring[0].sched.ready) {
1148 		amdgpu_mes_wreg(adev, reg, v, xcc_id);
1149 		return;
1150 	}
1151 
1152 	spin_lock_irqsave(&kiq->ring_lock, flags);
1153 	r = amdgpu_ring_alloc(ring, 32);
1154 	if (r)
1155 		goto failed_unlock;
1156 
1157 	amdgpu_ring_emit_wreg(ring, reg, v);
1158 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1159 	if (r)
1160 		goto failed_undo;
1161 
1162 	amdgpu_ring_commit(ring);
1163 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1164 
1165 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1166 
1167 	/* don't wait anymore for gpu reset case because this way may
1168 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1169 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1170 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1171 	 * gpu_recover() hang there.
1172 	 *
1173 	 * also don't wait anymore for IRQ context
1174 	 * */
1175 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1176 		goto failed_kiq_write;
1177 
1178 	might_sleep();
1179 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1180 		if (amdgpu_in_reset(adev))
1181 			goto failed_kiq_write;
1182 
1183 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1184 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1185 	}
1186 
1187 	if (cnt > MAX_KIQ_REG_TRY)
1188 		goto failed_kiq_write;
1189 
1190 	return;
1191 
1192 failed_undo:
1193 	amdgpu_ring_undo(ring);
1194 failed_unlock:
1195 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1196 failed_kiq_write:
1197 	dev_err(adev->dev, "failed to write reg:%x\n", reg);
1198 }
1199 
1200 int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
1201 {
1202 	signed long r, cnt = 0;
1203 	unsigned long flags;
1204 	uint32_t seq;
1205 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1206 	struct amdgpu_ring *ring = &kiq->ring;
1207 
1208 	if (amdgpu_device_skip_hw_access(adev))
1209 		return 0;
1210 
1211 	if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
1212 		return amdgpu_mes_hdp_flush(adev);
1213 
1214 	if (!ring->funcs->emit_hdp_flush) {
1215 		return -EOPNOTSUPP;
1216 	}
1217 
1218 	spin_lock_irqsave(&kiq->ring_lock, flags);
1219 	r = amdgpu_ring_alloc(ring, 32);
1220 	if (r)
1221 		goto failed_unlock;
1222 
1223 	amdgpu_ring_emit_hdp_flush(ring);
1224 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1225 	if (r)
1226 		goto failed_undo;
1227 
1228 	amdgpu_ring_commit(ring);
1229 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1230 
1231 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1232 
1233 	/* don't wait anymore for gpu reset case because this way may
1234 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1235 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1236 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1237 	 * gpu_recover() hang there.
1238 	 *
1239 	 * also don't wait anymore for IRQ context
1240 	 * */
1241 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1242 		goto failed_kiq_hdp_flush;
1243 
1244 	might_sleep();
1245 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1246 		if (amdgpu_in_reset(adev))
1247 			goto failed_kiq_hdp_flush;
1248 
1249 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1250 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1251 	}
1252 
1253 	if (cnt > MAX_KIQ_REG_TRY) {
1254 		dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
1255 		return -ETIMEDOUT;
1256 	}
1257 
1258 	return 0;
1259 
1260 failed_undo:
1261 	amdgpu_ring_undo(ring);
1262 failed_unlock:
1263 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1264 failed_kiq_hdp_flush:
1265 	dev_err(adev->dev, "failed to flush HDP via KIQ\n");
1266 	return r < 0 ? r : -EIO;
1267 }
1268 
1269 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1270 {
1271 	if (amdgpu_num_kcq == -1) {
1272 		return 8;
1273 	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1274 		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1275 		return 8;
1276 	}
1277 	return amdgpu_num_kcq;
1278 }
1279 
1280 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1281 				  uint32_t ucode_id)
1282 {
1283 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1284 	const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1285 	struct amdgpu_firmware_info *info = NULL;
1286 	const struct firmware *ucode_fw;
1287 	unsigned int fw_size;
1288 
1289 	switch (ucode_id) {
1290 	case AMDGPU_UCODE_ID_CP_PFP:
1291 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1292 			adev->gfx.pfp_fw->data;
1293 		adev->gfx.pfp_fw_version =
1294 			le32_to_cpu(cp_hdr->header.ucode_version);
1295 		adev->gfx.pfp_feature_version =
1296 			le32_to_cpu(cp_hdr->ucode_feature_version);
1297 		ucode_fw = adev->gfx.pfp_fw;
1298 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1299 		break;
1300 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
1301 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1302 			adev->gfx.pfp_fw->data;
1303 		adev->gfx.pfp_fw_version =
1304 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1305 		adev->gfx.pfp_feature_version =
1306 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1307 		ucode_fw = adev->gfx.pfp_fw;
1308 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1309 		break;
1310 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1311 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1312 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1313 			adev->gfx.pfp_fw->data;
1314 		ucode_fw = adev->gfx.pfp_fw;
1315 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1316 		break;
1317 	case AMDGPU_UCODE_ID_CP_ME:
1318 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1319 			adev->gfx.me_fw->data;
1320 		adev->gfx.me_fw_version =
1321 			le32_to_cpu(cp_hdr->header.ucode_version);
1322 		adev->gfx.me_feature_version =
1323 			le32_to_cpu(cp_hdr->ucode_feature_version);
1324 		ucode_fw = adev->gfx.me_fw;
1325 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1326 		break;
1327 	case AMDGPU_UCODE_ID_CP_RS64_ME:
1328 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1329 			adev->gfx.me_fw->data;
1330 		adev->gfx.me_fw_version =
1331 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1332 		adev->gfx.me_feature_version =
1333 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1334 		ucode_fw = adev->gfx.me_fw;
1335 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1336 		break;
1337 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1338 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1339 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1340 			adev->gfx.me_fw->data;
1341 		ucode_fw = adev->gfx.me_fw;
1342 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1343 		break;
1344 	case AMDGPU_UCODE_ID_CP_CE:
1345 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1346 			adev->gfx.ce_fw->data;
1347 		adev->gfx.ce_fw_version =
1348 			le32_to_cpu(cp_hdr->header.ucode_version);
1349 		adev->gfx.ce_feature_version =
1350 			le32_to_cpu(cp_hdr->ucode_feature_version);
1351 		ucode_fw = adev->gfx.ce_fw;
1352 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1353 		break;
1354 	case AMDGPU_UCODE_ID_CP_MEC1:
1355 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1356 			adev->gfx.mec_fw->data;
1357 		adev->gfx.mec_fw_version =
1358 			le32_to_cpu(cp_hdr->header.ucode_version);
1359 		adev->gfx.mec_feature_version =
1360 			le32_to_cpu(cp_hdr->ucode_feature_version);
1361 		ucode_fw = adev->gfx.mec_fw;
1362 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1363 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1364 		break;
1365 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1366 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1367 			adev->gfx.mec_fw->data;
1368 		ucode_fw = adev->gfx.mec_fw;
1369 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1370 		break;
1371 	case AMDGPU_UCODE_ID_CP_MEC2:
1372 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1373 			adev->gfx.mec2_fw->data;
1374 		adev->gfx.mec2_fw_version =
1375 			le32_to_cpu(cp_hdr->header.ucode_version);
1376 		adev->gfx.mec2_feature_version =
1377 			le32_to_cpu(cp_hdr->ucode_feature_version);
1378 		ucode_fw = adev->gfx.mec2_fw;
1379 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1380 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1381 		break;
1382 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1383 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1384 			adev->gfx.mec2_fw->data;
1385 		ucode_fw = adev->gfx.mec2_fw;
1386 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1387 		break;
1388 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
1389 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1390 			adev->gfx.mec_fw->data;
1391 		adev->gfx.mec_fw_version =
1392 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1393 		adev->gfx.mec_feature_version =
1394 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1395 		ucode_fw = adev->gfx.mec_fw;
1396 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1397 		break;
1398 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1399 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1400 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1401 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1402 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1403 			adev->gfx.mec_fw->data;
1404 		ucode_fw = adev->gfx.mec_fw;
1405 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1406 		break;
1407 	default:
1408 		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1409 		return;
1410 	}
1411 
1412 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1413 		info = &adev->firmware.ucode[ucode_id];
1414 		info->ucode_id = ucode_id;
1415 		info->fw = ucode_fw;
1416 		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1417 	}
1418 }
1419 
1420 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1421 {
1422 	return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1423 			adev->gfx.num_xcc_per_xcp : 1));
1424 }
1425 
1426 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1427 						struct device_attribute *addr,
1428 						char *buf)
1429 {
1430 	struct drm_device *ddev = dev_get_drvdata(dev);
1431 	struct amdgpu_device *adev = drm_to_adev(ddev);
1432 	int mode;
1433 
1434 	/* Only minimal precaution taken to reject requests while in reset.*/
1435 	if (amdgpu_in_reset(adev))
1436 		return -EPERM;
1437 
1438 	mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1439 					       AMDGPU_XCP_FL_NONE);
1440 
1441 	return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1442 }
1443 
1444 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1445 						struct device_attribute *addr,
1446 						const char *buf, size_t count)
1447 {
1448 	struct drm_device *ddev = dev_get_drvdata(dev);
1449 	struct amdgpu_device *adev = drm_to_adev(ddev);
1450 	enum amdgpu_gfx_partition mode;
1451 	int ret = 0, num_xcc;
1452 
1453 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1454 	if (num_xcc % 2 != 0)
1455 		return -EINVAL;
1456 
1457 	if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1458 		mode = AMDGPU_SPX_PARTITION_MODE;
1459 	} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1460 		/*
1461 		 * DPX mode needs AIDs to be in multiple of 2.
1462 		 * Each AID connects 2 XCCs.
1463 		 */
1464 		if (num_xcc%4)
1465 			return -EINVAL;
1466 		mode = AMDGPU_DPX_PARTITION_MODE;
1467 	} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1468 		if (num_xcc != 6)
1469 			return -EINVAL;
1470 		mode = AMDGPU_TPX_PARTITION_MODE;
1471 	} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1472 		if (num_xcc != 8)
1473 			return -EINVAL;
1474 		mode = AMDGPU_QPX_PARTITION_MODE;
1475 	} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1476 		mode = AMDGPU_CPX_PARTITION_MODE;
1477 	} else {
1478 		return -EINVAL;
1479 	}
1480 
1481 	/* Don't allow a switch while under reset */
1482 	if (!down_read_trylock(&adev->reset_domain->sem))
1483 		return -EPERM;
1484 
1485 	ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1486 
1487 	up_read(&adev->reset_domain->sem);
1488 
1489 	if (ret)
1490 		return ret;
1491 
1492 	return count;
1493 }
1494 
1495 static const char *xcp_desc[] = {
1496 	[AMDGPU_SPX_PARTITION_MODE] = "SPX",
1497 	[AMDGPU_DPX_PARTITION_MODE] = "DPX",
1498 	[AMDGPU_TPX_PARTITION_MODE] = "TPX",
1499 	[AMDGPU_QPX_PARTITION_MODE] = "QPX",
1500 	[AMDGPU_CPX_PARTITION_MODE] = "CPX",
1501 };
1502 
1503 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1504 						struct device_attribute *addr,
1505 						char *buf)
1506 {
1507 	struct drm_device *ddev = dev_get_drvdata(dev);
1508 	struct amdgpu_device *adev = drm_to_adev(ddev);
1509 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1510 	int size = 0, mode;
1511 	char *sep = "";
1512 
1513 	if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
1514 		return sysfs_emit(buf, "Not supported\n");
1515 
1516 	for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
1517 		size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
1518 		sep = ", ";
1519 	}
1520 
1521 	size += sysfs_emit_at(buf, size, "\n");
1522 
1523 	return size;
1524 }
1525 
1526 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1527 {
1528 	struct amdgpu_device *adev = ring->adev;
1529 	struct drm_gpu_scheduler *sched = &ring->sched;
1530 	struct drm_sched_entity entity;
1531 	static atomic_t counter;
1532 	struct dma_fence *f;
1533 	struct amdgpu_job *job;
1534 	struct amdgpu_ib *ib;
1535 	void *owner;
1536 	int i, r;
1537 
1538 	/* Initialize the scheduler entity */
1539 	r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1540 				  &sched, 1, NULL);
1541 	if (r) {
1542 		dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1543 		goto err;
1544 	}
1545 
1546 	/*
1547 	 * Use some unique dummy value as the owner to make sure we execute
1548 	 * the cleaner shader on each submission. The value just need to change
1549 	 * for each submission and is otherwise meaningless.
1550 	 */
1551 	owner = (void *)(unsigned long)atomic_inc_return(&counter);
1552 
1553 	r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
1554 				     64, 0, &job,
1555 				     AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
1556 	if (r)
1557 		goto err;
1558 
1559 	job->enforce_isolation = true;
1560 	/* always run the cleaner shader */
1561 	job->run_cleaner_shader = true;
1562 
1563 	ib = &job->ibs[0];
1564 	for (i = 0; i <= ring->funcs->align_mask; ++i)
1565 		ib->ptr[i] = ring->funcs->nop;
1566 	ib->length_dw = ring->funcs->align_mask + 1;
1567 
1568 	f = amdgpu_job_submit(job);
1569 
1570 	r = dma_fence_wait(f, false);
1571 	if (r)
1572 		goto err;
1573 
1574 	dma_fence_put(f);
1575 
1576 	/* Clean up the scheduler entity */
1577 	drm_sched_entity_destroy(&entity);
1578 	return 0;
1579 
1580 err:
1581 	return r;
1582 }
1583 
1584 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1585 {
1586 	int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1587 	struct amdgpu_ring *ring;
1588 	int num_xcc_to_clear;
1589 	int i, r, xcc_id;
1590 
1591 	if (adev->gfx.num_xcc_per_xcp)
1592 		num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1593 	else
1594 		num_xcc_to_clear = 1;
1595 
1596 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1597 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1598 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1599 			if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1600 				r = amdgpu_gfx_run_cleaner_shader_job(ring);
1601 				if (r)
1602 					return r;
1603 				num_xcc_to_clear--;
1604 				break;
1605 			}
1606 		}
1607 	}
1608 
1609 	if (num_xcc_to_clear)
1610 		return -ENOENT;
1611 
1612 	return 0;
1613 }
1614 
1615 /**
1616  * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
1617  * @dev: The device structure
1618  * @attr: The device attribute structure
1619  * @buf: The buffer containing the input data
1620  * @count: The size of the input data
1621  *
1622  * Provides the sysfs interface to manually run a cleaner shader, which is
1623  * used to clear the GPU state between different tasks. Writing a value to the
1624  * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
1625  * The value written corresponds to the partition index on multi-partition
1626  * devices. On single-partition devices, the value should be '0'.
1627  *
1628  * The cleaner shader clears the Local Data Store (LDS) and General Purpose
1629  * Registers (GPRs) to ensure data isolation between GPU workloads.
1630  *
1631  * Return: The number of bytes written to the sysfs file.
1632  */
1633 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1634 						 struct device_attribute *attr,
1635 						 const char *buf,
1636 						 size_t count)
1637 {
1638 	struct drm_device *ddev = dev_get_drvdata(dev);
1639 	struct amdgpu_device *adev = drm_to_adev(ddev);
1640 	int ret;
1641 	long value;
1642 
1643 	if (amdgpu_in_reset(adev))
1644 		return -EPERM;
1645 	if (adev->in_suspend && !adev->in_runpm)
1646 		return -EPERM;
1647 
1648 	if (adev->gfx.disable_kq)
1649 		return -EPERM;
1650 
1651 	ret = kstrtol(buf, 0, &value);
1652 
1653 	if (ret)
1654 		return -EINVAL;
1655 
1656 	if (value < 0)
1657 		return -EINVAL;
1658 
1659 	if (adev->xcp_mgr) {
1660 		if (value >= adev->xcp_mgr->num_xcps)
1661 			return -EINVAL;
1662 	} else {
1663 		if (value > 1)
1664 			return -EINVAL;
1665 	}
1666 
1667 	ret = pm_runtime_get_sync(ddev->dev);
1668 	if (ret < 0) {
1669 		pm_runtime_put_autosuspend(ddev->dev);
1670 		return ret;
1671 	}
1672 
1673 	ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1674 
1675 	pm_runtime_put_autosuspend(ddev->dev);
1676 
1677 	if (ret)
1678 		return ret;
1679 
1680 	return count;
1681 }
1682 
1683 /**
1684  * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
1685  * @dev: The device structure
1686  * @attr: The device attribute structure
1687  * @buf: The buffer to store the output data
1688  *
1689  * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
1690  * feature for each GPU partition. Reading from the 'enforce_isolation'
1691  * sysfs file returns the isolation settings for all partitions, where '0'
1692  * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
1693  * and '3' indicates enabled without cleaner shader.
1694  *
1695  * Return: The number of bytes read from the sysfs file.
1696  */
1697 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1698 						struct device_attribute *attr,
1699 						char *buf)
1700 {
1701 	struct drm_device *ddev = dev_get_drvdata(dev);
1702 	struct amdgpu_device *adev = drm_to_adev(ddev);
1703 	int i;
1704 	ssize_t size = 0;
1705 
1706 	if (adev->xcp_mgr) {
1707 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1708 			size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1709 			if (i < (adev->xcp_mgr->num_xcps - 1))
1710 				size += sysfs_emit_at(buf, size, " ");
1711 		}
1712 		buf[size++] = '\n';
1713 	} else {
1714 		size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1715 	}
1716 
1717 	return size;
1718 }
1719 
1720 /**
1721  * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
1722  * @dev: The device structure
1723  * @attr: The device attribute structure
1724  * @buf: The buffer containing the input data
1725  * @count: The size of the input data
1726  *
1727  * This function allows control over the 'enforce_isolation' feature, which
1728  * serializes access to the graphics engine. Writing '0' to disable, '1' to
1729  * enable isolation with cleaner shader, '2' to enable legacy isolation without
1730  * cleaner shader, or '3' to enable process isolation without submitting the
1731  * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
1732  * for each partition. The input should specify the setting for all
1733  * partitions.
1734  *
1735  * Return: The number of bytes written to the sysfs file.
1736  */
1737 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1738 						struct device_attribute *attr,
1739 						const char *buf, size_t count)
1740 {
1741 	struct drm_device *ddev = dev_get_drvdata(dev);
1742 	struct amdgpu_device *adev = drm_to_adev(ddev);
1743 	long partition_values[MAX_XCP] = {0};
1744 	int ret, i, num_partitions;
1745 	const char *input_buf = buf;
1746 
1747 	for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1748 		ret = sscanf(input_buf, "%ld", &partition_values[i]);
1749 		if (ret <= 0)
1750 			break;
1751 
1752 		/* Move the pointer to the next value in the string */
1753 		input_buf = strchr(input_buf, ' ');
1754 		if (input_buf) {
1755 			input_buf++;
1756 		} else {
1757 			i++;
1758 			break;
1759 		}
1760 	}
1761 	num_partitions = i;
1762 
1763 	if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1764 		return -EINVAL;
1765 
1766 	if (!adev->xcp_mgr && num_partitions != 1)
1767 		return -EINVAL;
1768 
1769 	for (i = 0; i < num_partitions; i++) {
1770 		if (partition_values[i] != 0 &&
1771 		    partition_values[i] != 1 &&
1772 		    partition_values[i] != 2 &&
1773 		    partition_values[i] != 3)
1774 			return -EINVAL;
1775 	}
1776 
1777 	mutex_lock(&adev->enforce_isolation_mutex);
1778 	for (i = 0; i < num_partitions; i++) {
1779 		switch (partition_values[i]) {
1780 		case 0:
1781 		default:
1782 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1783 			break;
1784 		case 1:
1785 			adev->enforce_isolation[i] =
1786 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
1787 			break;
1788 		case 2:
1789 			adev->enforce_isolation[i] =
1790 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1791 			break;
1792 		case 3:
1793 			adev->enforce_isolation[i] =
1794 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1795 			break;
1796 		}
1797 	}
1798 	mutex_unlock(&adev->enforce_isolation_mutex);
1799 
1800 	amdgpu_mes_update_enforce_isolation(adev);
1801 
1802 	return count;
1803 }
1804 
1805 static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
1806 						struct device_attribute *attr,
1807 						char *buf)
1808 {
1809 	struct drm_device *ddev = dev_get_drvdata(dev);
1810 	struct amdgpu_device *adev = drm_to_adev(ddev);
1811 
1812 	if (!adev)
1813 		return -ENODEV;
1814 
1815 	return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
1816 }
1817 
1818 static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
1819 						struct device_attribute *attr,
1820 						char *buf)
1821 {
1822 	struct drm_device *ddev = dev_get_drvdata(dev);
1823 	struct amdgpu_device *adev = drm_to_adev(ddev);
1824 
1825 	if (!adev)
1826 		return -ENODEV;
1827 
1828 	return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
1829 }
1830 
1831 static DEVICE_ATTR(run_cleaner_shader, 0200,
1832 		   NULL, amdgpu_gfx_set_run_cleaner_shader);
1833 
1834 static DEVICE_ATTR(enforce_isolation, 0644,
1835 		   amdgpu_gfx_get_enforce_isolation,
1836 		   amdgpu_gfx_set_enforce_isolation);
1837 
1838 static DEVICE_ATTR(current_compute_partition, 0644,
1839 		   amdgpu_gfx_get_current_compute_partition,
1840 		   amdgpu_gfx_set_compute_partition);
1841 
1842 static DEVICE_ATTR(available_compute_partition, 0444,
1843 		   amdgpu_gfx_get_available_compute_partition, NULL);
1844 static DEVICE_ATTR(gfx_reset_mask, 0444,
1845 		   amdgpu_gfx_get_gfx_reset_mask, NULL);
1846 
1847 static DEVICE_ATTR(compute_reset_mask, 0444,
1848 		   amdgpu_gfx_get_compute_reset_mask, NULL);
1849 
1850 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
1851 {
1852 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1853 	bool xcp_switch_supported;
1854 	int r;
1855 
1856 	if (!xcp_mgr)
1857 		return 0;
1858 
1859 	xcp_switch_supported =
1860 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1861 
1862 	if (!xcp_switch_supported)
1863 		dev_attr_current_compute_partition.attr.mode &=
1864 			~(S_IWUSR | S_IWGRP | S_IWOTH);
1865 
1866 	r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1867 	if (r)
1868 		return r;
1869 
1870 	if (xcp_switch_supported)
1871 		r = device_create_file(adev->dev,
1872 				       &dev_attr_available_compute_partition);
1873 
1874 	return r;
1875 }
1876 
1877 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
1878 {
1879 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1880 	bool xcp_switch_supported;
1881 
1882 	if (!xcp_mgr)
1883 		return;
1884 
1885 	xcp_switch_supported =
1886 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1887 	device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1888 
1889 	if (xcp_switch_supported)
1890 		device_remove_file(adev->dev,
1891 				   &dev_attr_available_compute_partition);
1892 }
1893 
1894 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
1895 {
1896 	int r;
1897 
1898 	r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
1899 	if (r)
1900 		return r;
1901 	if (adev->gfx.enable_cleaner_shader)
1902 		r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
1903 
1904 	return r;
1905 }
1906 
1907 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
1908 {
1909 	device_remove_file(adev->dev, &dev_attr_enforce_isolation);
1910 	if (adev->gfx.enable_cleaner_shader)
1911 		device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
1912 }
1913 
1914 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
1915 {
1916 	int r = 0;
1917 
1918 	if (!amdgpu_gpu_recovery)
1919 		return r;
1920 
1921 	if (adev->gfx.num_gfx_rings) {
1922 		r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
1923 		if (r)
1924 			return r;
1925 	}
1926 
1927 	if (adev->gfx.num_compute_rings) {
1928 		r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
1929 		if (r)
1930 			return r;
1931 	}
1932 
1933 	return r;
1934 }
1935 
1936 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1937 {
1938 	if (!amdgpu_gpu_recovery)
1939 		return;
1940 
1941 	if (adev->gfx.num_gfx_rings)
1942 		device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
1943 
1944 	if (adev->gfx.num_compute_rings)
1945 		device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
1946 }
1947 
1948 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1949 {
1950 	int r;
1951 
1952 	r = amdgpu_gfx_sysfs_xcp_init(adev);
1953 	if (r) {
1954 		dev_err(adev->dev, "failed to create xcp sysfs files");
1955 		return r;
1956 	}
1957 
1958 	r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1959 	if (r)
1960 		dev_err(adev->dev, "failed to create isolation sysfs files");
1961 
1962 	r = amdgpu_gfx_sysfs_reset_mask_init(adev);
1963 	if (r)
1964 		dev_err(adev->dev, "failed to create reset mask sysfs files");
1965 
1966 	return r;
1967 }
1968 
1969 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1970 {
1971 	if (adev->dev->kobj.sd) {
1972 		amdgpu_gfx_sysfs_xcp_fini(adev);
1973 		amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1974 		amdgpu_gfx_sysfs_reset_mask_fini(adev);
1975 	}
1976 }
1977 
1978 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
1979 				      unsigned int cleaner_shader_size)
1980 {
1981 	if (!adev->gfx.enable_cleaner_shader)
1982 		return -EOPNOTSUPP;
1983 
1984 	return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
1985 				       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
1986 				       &adev->gfx.cleaner_shader_obj,
1987 				       &adev->gfx.cleaner_shader_gpu_addr,
1988 				       (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1989 }
1990 
1991 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
1992 {
1993 	if (!adev->gfx.enable_cleaner_shader)
1994 		return;
1995 
1996 	amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
1997 			      &adev->gfx.cleaner_shader_gpu_addr,
1998 			      (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1999 }
2000 
2001 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
2002 				    unsigned int cleaner_shader_size,
2003 				    const void *cleaner_shader_ptr)
2004 {
2005 	if (!adev->gfx.enable_cleaner_shader)
2006 		return;
2007 
2008 	if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
2009 		memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
2010 			    cleaner_shader_size);
2011 }
2012 
2013 /**
2014  * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
2015  * @adev: amdgpu_device pointer
2016  * @idx: Index of the scheduler to control
2017  * @enable: Whether to enable or disable the KFD scheduler
2018  *
2019  * This function is used to control the KFD (Kernel Fusion Driver) scheduler
2020  * from the KGD. It is part of the cleaner shader feature. This function plays
2021  * a key role in enforcing process isolation on the GPU.
2022  *
2023  * The function uses a reference count mechanism (kfd_sch_req_count) to keep
2024  * track of the number of requests to enable the KFD scheduler. When a request
2025  * to enable the KFD scheduler is made, the reference count is decremented.
2026  * When the reference count reaches zero, a delayed work is scheduled to
2027  * enforce isolation after a delay of GFX_SLICE_PERIOD.
2028  *
2029  * When a request to disable the KFD scheduler is made, the function first
2030  * checks if the reference count is zero. If it is, it cancels the delayed work
2031  * for enforcing isolation and checks if the KFD scheduler is active. If the
2032  * KFD scheduler is active, it sends a request to stop the KFD scheduler and
2033  * sets the KFD scheduler state to inactive. Then, it increments the reference
2034  * count.
2035  *
2036  * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
2037  * scheduler state and reference count are updated atomically.
2038  *
2039  * Note: If the reference count is already zero when a request to enable the
2040  * KFD scheduler is made, it means there's an imbalance bug somewhere. The
2041  * function triggers a warning in this case.
2042  */
2043 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
2044 				    bool enable)
2045 {
2046 	mutex_lock(&adev->gfx.userq_sch_mutex);
2047 
2048 	if (enable) {
2049 		/* If the count is already 0, it means there's an imbalance bug somewhere.
2050 		 * Note that the bug may be in a different caller than the one which triggers the
2051 		 * WARN_ON_ONCE.
2052 		 */
2053 		if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
2054 			dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
2055 			goto unlock;
2056 		}
2057 
2058 		adev->gfx.userq_sch_req_count[idx]--;
2059 
2060 		if (adev->gfx.userq_sch_req_count[idx] == 0 &&
2061 		    adev->gfx.userq_sch_inactive[idx]) {
2062 			schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2063 					      msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
2064 		}
2065 	} else {
2066 		if (adev->gfx.userq_sch_req_count[idx] == 0) {
2067 			cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
2068 			if (!adev->gfx.userq_sch_inactive[idx]) {
2069 				amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
2070 				if (adev->kfd.init_complete)
2071 					amdgpu_amdkfd_stop_sched(adev, idx);
2072 				adev->gfx.userq_sch_inactive[idx] = true;
2073 			}
2074 		}
2075 
2076 		adev->gfx.userq_sch_req_count[idx]++;
2077 	}
2078 
2079 unlock:
2080 	mutex_unlock(&adev->gfx.userq_sch_mutex);
2081 }
2082 
2083 /**
2084  * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
2085  *
2086  * @work: work_struct.
2087  *
2088  * This function is the work handler for enforcing shader isolation on AMD GPUs.
2089  * It counts the number of emitted fences for each GFX and compute ring. If there
2090  * are any fences, it schedules the `enforce_isolation_work` to be run after a
2091  * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
2092  * Driver (KFD) to resume the runqueue. The function is synchronized using the
2093  * `enforce_isolation_mutex`.
2094  */
2095 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
2096 {
2097 	struct amdgpu_isolation_work *isolation_work =
2098 		container_of(work, struct amdgpu_isolation_work, work.work);
2099 	struct amdgpu_device *adev = isolation_work->adev;
2100 	u32 i, idx, fences = 0;
2101 
2102 	if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
2103 		idx = 0;
2104 	else
2105 		idx = isolation_work->xcp_id;
2106 
2107 	if (idx >= MAX_XCP)
2108 		return;
2109 
2110 	mutex_lock(&adev->enforce_isolation_mutex);
2111 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
2112 		if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
2113 			fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2114 	}
2115 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
2116 		if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
2117 			fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2118 	}
2119 	if (fences) {
2120 		/* we've already had our timeslice, so let's wrap this up */
2121 		schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2122 				      msecs_to_jiffies(1));
2123 	} else {
2124 		/* Tell KFD to resume the runqueue */
2125 		WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
2126 		WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
2127 
2128 		amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
2129 		if (adev->kfd.init_complete)
2130 			amdgpu_amdkfd_start_sched(adev, idx);
2131 		adev->gfx.userq_sch_inactive[idx] = false;
2132 	}
2133 	mutex_unlock(&adev->enforce_isolation_mutex);
2134 }
2135 
2136 /**
2137  * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
2138  * @adev: amdgpu_device pointer
2139  * @idx: Index of the GPU partition
2140  *
2141  * When kernel submissions come in, the jobs are given a time slice and once
2142  * that time slice is up, if there are KFD user queues active, kernel
2143  * submissions are blocked until KFD has had its time slice. Once the KFD time
2144  * slice is up, KFD user queues are preempted and kernel submissions are
2145  * unblocked and allowed to run again.
2146  */
2147 static void
2148 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
2149 					  u32 idx)
2150 {
2151 	unsigned long cjiffies;
2152 	bool wait = false;
2153 
2154 	mutex_lock(&adev->enforce_isolation_mutex);
2155 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2156 		/* set the initial values if nothing is set */
2157 		if (!adev->gfx.enforce_isolation_jiffies[idx]) {
2158 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2159 			adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
2160 		}
2161 		/* Make sure KFD gets a chance to run */
2162 		if (amdgpu_amdkfd_compute_active(adev, idx)) {
2163 			cjiffies = jiffies;
2164 			if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
2165 				cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
2166 				if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
2167 					/* if our time is up, let KGD work drain before scheduling more */
2168 					wait = true;
2169 					/* reset the timer period */
2170 					adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
2171 				} else {
2172 					/* set the timer period to what's left in our time slice */
2173 					adev->gfx.enforce_isolation_time[idx] =
2174 						GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
2175 				}
2176 			} else {
2177 				/* if jiffies wrap around we will just wait a little longer */
2178 				adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2179 			}
2180 		} else {
2181 			/* if there is no KFD work, then set the full slice period */
2182 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2183 			adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2184 		}
2185 	}
2186 	mutex_unlock(&adev->enforce_isolation_mutex);
2187 
2188 	if (wait)
2189 		msleep(GFX_SLICE_PERIOD_MS);
2190 }
2191 
2192 /**
2193  * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
2194  * @ring: Pointer to the amdgpu_ring structure
2195  *
2196  * Ring begin_use helper implementation for gfx which serializes access to the
2197  * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2198  * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2199  * each get a time slice when both are active.
2200  */
2201 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2202 {
2203 	struct amdgpu_device *adev = ring->adev;
2204 	u32 idx;
2205 	bool sched_work = false;
2206 
2207 	if (!adev->gfx.enable_cleaner_shader)
2208 		return;
2209 
2210 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2211 		idx = 0;
2212 	else
2213 		idx = ring->xcp_id;
2214 
2215 	if (idx >= MAX_XCP)
2216 		return;
2217 
2218 	/* Don't submit more work until KFD has had some time */
2219 	amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
2220 
2221 	mutex_lock(&adev->enforce_isolation_mutex);
2222 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2223 		if (adev->kfd.init_complete)
2224 			sched_work = true;
2225 	}
2226 	mutex_unlock(&adev->enforce_isolation_mutex);
2227 
2228 	if (sched_work)
2229 		amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
2230 }
2231 
2232 /**
2233  * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
2234  * @ring: Pointer to the amdgpu_ring structure
2235  *
2236  * Ring end_use helper implementation for gfx which serializes access to the
2237  * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2238  * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2239  * each get a time slice when both are active.
2240  */
2241 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2242 {
2243 	struct amdgpu_device *adev = ring->adev;
2244 	u32 idx;
2245 	bool sched_work = false;
2246 
2247 	if (!adev->gfx.enable_cleaner_shader)
2248 		return;
2249 
2250 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2251 		idx = 0;
2252 	else
2253 		idx = ring->xcp_id;
2254 
2255 	if (idx >= MAX_XCP)
2256 		return;
2257 
2258 	mutex_lock(&adev->enforce_isolation_mutex);
2259 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2260 		if (adev->kfd.init_complete)
2261 			sched_work = true;
2262 	}
2263 	mutex_unlock(&adev->enforce_isolation_mutex);
2264 
2265 	if (sched_work)
2266 		amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
2267 }
2268 
2269 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
2270 {
2271 	struct amdgpu_device *adev =
2272 		container_of(work, struct amdgpu_device, gfx.idle_work.work);
2273 	enum PP_SMC_POWER_PROFILE profile;
2274 	u32 i, fences = 0;
2275 	int r;
2276 
2277 	if (adev->gfx.num_gfx_rings)
2278 		profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2279 	else
2280 		profile = PP_SMC_POWER_PROFILE_COMPUTE;
2281 
2282 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
2283 		fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2284 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
2285 		fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2286 	if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
2287 		mutex_lock(&adev->gfx.workload_profile_mutex);
2288 		if (adev->gfx.workload_profile_active) {
2289 			r = amdgpu_dpm_switch_power_profile(adev, profile, false);
2290 			if (r)
2291 				dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2292 					 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2293 					 "fullscreen 3D" : "compute");
2294 			adev->gfx.workload_profile_active = false;
2295 		}
2296 		mutex_unlock(&adev->gfx.workload_profile_mutex);
2297 	} else {
2298 		schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2299 	}
2300 }
2301 
2302 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
2303 {
2304 	struct amdgpu_device *adev = ring->adev;
2305 	enum PP_SMC_POWER_PROFILE profile;
2306 	int r;
2307 
2308 	if (amdgpu_dpm_is_overdrive_enabled(adev))
2309 		return;
2310 
2311 	if (adev->gfx.num_gfx_rings)
2312 		profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2313 	else
2314 		profile = PP_SMC_POWER_PROFILE_COMPUTE;
2315 
2316 	atomic_inc(&adev->gfx.total_submission_cnt);
2317 
2318 	cancel_delayed_work_sync(&adev->gfx.idle_work);
2319 
2320 	/* We can safely return early here because we've cancelled the
2321 	 * the delayed work so there is no one else to set it to false
2322 	 * and we don't care if someone else sets it to true.
2323 	 */
2324 	if (adev->gfx.workload_profile_active)
2325 		return;
2326 
2327 	mutex_lock(&adev->gfx.workload_profile_mutex);
2328 	if (!adev->gfx.workload_profile_active) {
2329 		r = amdgpu_dpm_switch_power_profile(adev, profile, true);
2330 		if (r)
2331 			dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2332 				 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2333 				 "fullscreen 3D" : "compute");
2334 		adev->gfx.workload_profile_active = true;
2335 	}
2336 	mutex_unlock(&adev->gfx.workload_profile_mutex);
2337 }
2338 
2339 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
2340 {
2341 	struct amdgpu_device *adev = ring->adev;
2342 
2343 	if (amdgpu_dpm_is_overdrive_enabled(adev))
2344 		return;
2345 
2346 	atomic_dec(&ring->adev->gfx.total_submission_cnt);
2347 
2348 	schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2349 }
2350 
2351 /**
2352  * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
2353  *
2354  * @buffer: This is an output variable that gets the PACKET3 preamble setup.
2355  *
2356  * Return:
2357  * return the latest index.
2358  */
2359 u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
2360 {
2361 	u32 count = 0;
2362 
2363 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2364 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2365 
2366 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2367 	buffer[count++] = cpu_to_le32(0x80000000);
2368 	buffer[count++] = cpu_to_le32(0x80000000);
2369 
2370 	return count;
2371 }
2372 
2373 /**
2374  * amdgpu_gfx_csb_data_parser - Parser CS data
2375  *
2376  * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
2377  * @buffer: This is an output variable that gets the PACKET3 preamble end.
2378  * @count: Index to start set the preemble end.
2379  *
2380  * Return:
2381  * return the latest index.
2382  */
2383 u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
2384 {
2385 	const struct cs_section_def *sect = NULL;
2386 	const struct cs_extent_def *ext = NULL;
2387 	u32 i;
2388 
2389 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2390 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2391 			if (sect->id == SECT_CONTEXT) {
2392 				buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2393 				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2394 
2395 				for (i = 0; i < ext->reg_count; i++)
2396 					buffer[count++] = cpu_to_le32(ext->extent[i]);
2397 			}
2398 		}
2399 	}
2400 
2401 	return count;
2402 }
2403 
2404 /**
2405  * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
2406  *
2407  * @buffer: This is an output variable that gets the PACKET3 preamble end.
2408  * @count: Index to start set the preemble end.
2409  */
2410 void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
2411 {
2412 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2413 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
2414 
2415 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
2416 	buffer[count++] = cpu_to_le32(0);
2417 }
2418 
2419 /*
2420  * debugfs for to enable/disable gfx job submission to specific core.
2421  */
2422 #if defined(CONFIG_DEBUG_FS)
2423 static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
2424 {
2425 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2426 	u32 i;
2427 	u64 mask = 0;
2428 	struct amdgpu_ring *ring;
2429 
2430 	if (!adev)
2431 		return -ENODEV;
2432 
2433 	mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
2434 	if ((val & mask) == 0)
2435 		return -EINVAL;
2436 
2437 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2438 		ring = &adev->gfx.gfx_ring[i];
2439 		if (val & (1 << i))
2440 			ring->sched.ready = true;
2441 		else
2442 			ring->sched.ready = false;
2443 	}
2444 	/* publish sched.ready flag update effective immediately across smp */
2445 	smp_rmb();
2446 	return 0;
2447 }
2448 
2449 static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
2450 {
2451 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2452 	u32 i;
2453 	u64 mask = 0;
2454 	struct amdgpu_ring *ring;
2455 
2456 	if (!adev)
2457 		return -ENODEV;
2458 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2459 		ring = &adev->gfx.gfx_ring[i];
2460 		if (ring->sched.ready)
2461 			mask |= 1ULL << i;
2462 	}
2463 
2464 	*val = mask;
2465 	return 0;
2466 }
2467 
2468 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
2469 			 amdgpu_debugfs_gfx_sched_mask_get,
2470 			 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
2471 
2472 #endif
2473 
2474 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
2475 {
2476 #if defined(CONFIG_DEBUG_FS)
2477 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2478 	struct dentry *root = minor->debugfs_root;
2479 	char name[32];
2480 
2481 	if (!(adev->gfx.num_gfx_rings > 1))
2482 		return;
2483 	sprintf(name, "amdgpu_gfx_sched_mask");
2484 	debugfs_create_file(name, 0600, root, adev,
2485 			    &amdgpu_debugfs_gfx_sched_mask_fops);
2486 #endif
2487 }
2488 
2489 /*
2490  * debugfs for to enable/disable compute job submission to specific core.
2491  */
2492 #if defined(CONFIG_DEBUG_FS)
2493 static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
2494 {
2495 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2496 	u32 i;
2497 	u64 mask = 0;
2498 	struct amdgpu_ring *ring;
2499 
2500 	if (!adev)
2501 		return -ENODEV;
2502 
2503 	mask = (1ULL << adev->gfx.num_compute_rings) - 1;
2504 	if ((val & mask) == 0)
2505 		return -EINVAL;
2506 
2507 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2508 		ring = &adev->gfx.compute_ring[i];
2509 		if (val & (1 << i))
2510 			ring->sched.ready = true;
2511 		else
2512 			ring->sched.ready = false;
2513 	}
2514 
2515 	/* publish sched.ready flag update effective immediately across smp */
2516 	smp_rmb();
2517 	return 0;
2518 }
2519 
2520 static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
2521 {
2522 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2523 	u32 i;
2524 	u64 mask = 0;
2525 	struct amdgpu_ring *ring;
2526 
2527 	if (!adev)
2528 		return -ENODEV;
2529 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2530 		ring = &adev->gfx.compute_ring[i];
2531 		if (ring->sched.ready)
2532 			mask |= 1ULL << i;
2533 	}
2534 
2535 	*val = mask;
2536 	return 0;
2537 }
2538 
2539 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
2540 			 amdgpu_debugfs_compute_sched_mask_get,
2541 			 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
2542 
2543 #endif
2544 
2545 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
2546 {
2547 #if defined(CONFIG_DEBUG_FS)
2548 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2549 	struct dentry *root = minor->debugfs_root;
2550 	char name[32];
2551 
2552 	if (!(adev->gfx.num_compute_rings > 1))
2553 		return;
2554 	sprintf(name, "amdgpu_compute_sched_mask");
2555 	debugfs_create_file(name, 0600, root, adev,
2556 			    &amdgpu_debugfs_compute_sched_mask_fops);
2557 #endif
2558 }
2559 
2560