xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
36 #include "amdgpu_mes.h"
37 #include "nvd.h"
38 
39 /* delay 0.1 second to enable gfx off feature */
40 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
41 
42 #define GFX_OFF_NO_DELAY 0
43 
44 /*
45  * GPU GFX IP block helpers function.
46  */
47 
48 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
49 				int pipe, int queue)
50 {
51 	int bit = 0;
52 
53 	bit += mec * adev->gfx.mec.num_pipe_per_mec
54 		* adev->gfx.mec.num_queue_per_pipe;
55 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
56 	bit += queue;
57 
58 	return bit;
59 }
60 
61 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
62 				 int *mec, int *pipe, int *queue)
63 {
64 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
65 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
66 		% adev->gfx.mec.num_pipe_per_mec;
67 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
68 	       / adev->gfx.mec.num_pipe_per_mec;
69 
70 }
71 
72 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
73 				     int xcc_id, int mec, int pipe, int queue)
74 {
75 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
76 			adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
77 }
78 
79 static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
80 				      int me, int pipe, int queue)
81 {
82 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
83 	int bit = 0;
84 
85 	bit += me * adev->gfx.me.num_pipe_per_me
86 		* num_queue_per_pipe;
87 	bit += pipe * num_queue_per_pipe;
88 	bit += queue;
89 
90 	return bit;
91 }
92 
93 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
94 				    int me, int pipe, int queue)
95 {
96 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
97 			adev->gfx.me.queue_bitmap);
98 }
99 
100 /**
101  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
102  *
103  * @adev: amdgpu device pointer
104  * @mask: array in which the per-shader array disable masks will be stored
105  * @max_se: number of SEs
106  * @max_sh: number of SHs
107  *
108  * The bitmask of CUs to be disabled in the shader array determined by se and
109  * sh is stored in mask[se * max_sh + sh].
110  */
111 void amdgpu_gfx_parse_disable_cu(struct amdgpu_device *adev, unsigned int *mask,
112 				 unsigned int max_se, unsigned int max_sh)
113 {
114 	unsigned int se, sh, cu;
115 	const char *p;
116 
117 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
118 
119 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
120 		return;
121 
122 	p = amdgpu_disable_cu;
123 	for (;;) {
124 		char *next;
125 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
126 
127 		if (ret < 3) {
128 			drm_err(adev_to_drm(adev), "could not parse disable_cu\n");
129 			return;
130 		}
131 
132 		if (se < max_se && sh < max_sh && cu < 16) {
133 			drm_info(adev_to_drm(adev), "Disabling CU %u.%u.%u\n", se, sh, cu);
134 			mask[se * max_sh + sh] |= 1u << cu;
135 		} else {
136 			drm_err(adev_to_drm(adev), "disable_cu %u.%u.%u is out of range\n",
137 				se, sh, cu);
138 		}
139 
140 		next = strchr(p, ',');
141 		if (!next)
142 			break;
143 		p = next + 1;
144 	}
145 }
146 
147 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
148 {
149 	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
150 }
151 
152 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
153 {
154 	if (amdgpu_compute_multipipe != -1) {
155 		dev_info(adev->dev, " forcing compute pipe policy %d\n",
156 			 amdgpu_compute_multipipe);
157 		return amdgpu_compute_multipipe == 1;
158 	}
159 
160 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
161 		return true;
162 
163 	/* FIXME: spreading the queues across pipes causes perf regressions
164 	 * on POLARIS11 compute workloads */
165 	if (adev->asic_type == CHIP_POLARIS11)
166 		return false;
167 
168 	return adev->gfx.mec.num_mec > 1;
169 }
170 
171 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
172 						struct amdgpu_ring *ring)
173 {
174 	int queue = ring->queue;
175 	int pipe = ring->pipe;
176 
177 	/* Policy: use pipe1 queue0 as high priority graphics queue if we
178 	 * have more than one gfx pipe.
179 	 */
180 	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
181 	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
182 		int me = ring->me;
183 		int bit;
184 
185 		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
186 		if (ring == &adev->gfx.gfx_ring[bit])
187 			return true;
188 	}
189 
190 	return false;
191 }
192 
193 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
194 					       struct amdgpu_ring *ring)
195 {
196 	/* Policy: use 1st queue as high priority compute queue if we
197 	 * have more than one compute queue.
198 	 */
199 	if (adev->gfx.num_compute_rings > 1 &&
200 	    ring == &adev->gfx.compute_ring[0])
201 		return true;
202 
203 	return false;
204 }
205 
206 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
207 {
208 	int i, j, queue, pipe;
209 	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
210 	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
211 				     adev->gfx.mec.num_queue_per_pipe,
212 				     adev->gfx.num_compute_rings);
213 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
214 
215 	if (multipipe_policy) {
216 		/* policy: make queues evenly cross all pipes on MEC1 only
217 		 * for multiple xcc, just use the original policy for simplicity */
218 		for (j = 0; j < num_xcc; j++) {
219 			for (i = 0; i < max_queues_per_mec; i++) {
220 				pipe = i % adev->gfx.mec.num_pipe_per_mec;
221 				queue = (i / adev->gfx.mec.num_pipe_per_mec) %
222 					 adev->gfx.mec.num_queue_per_pipe;
223 
224 				set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
225 					adev->gfx.mec_bitmap[j].queue_bitmap);
226 			}
227 		}
228 	} else {
229 		/* policy: amdgpu owns all queues in the given pipe */
230 		for (j = 0; j < num_xcc; j++) {
231 			for (i = 0; i < max_queues_per_mec; ++i)
232 				set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
233 		}
234 	}
235 
236 	for (j = 0; j < num_xcc; j++) {
237 		dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
238 			bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
239 	}
240 }
241 
242 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
243 {
244 	int i, queue, pipe;
245 	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
246 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
247 	int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
248 
249 	if (multipipe_policy) {
250 		/* policy: amdgpu owns the first queue per pipe at this stage
251 		 * will extend to mulitple queues per pipe later */
252 		for (i = 0; i < max_queues_per_me; i++) {
253 			pipe = i % adev->gfx.me.num_pipe_per_me;
254 			queue = (i / adev->gfx.me.num_pipe_per_me) %
255 				num_queue_per_pipe;
256 
257 			set_bit(pipe * num_queue_per_pipe + queue,
258 				adev->gfx.me.queue_bitmap);
259 		}
260 	} else {
261 		for (i = 0; i < max_queues_per_me; ++i)
262 			set_bit(i, adev->gfx.me.queue_bitmap);
263 	}
264 
265 	/* update the number of active graphics rings */
266 	if (adev->gfx.num_gfx_rings)
267 		adev->gfx.num_gfx_rings =
268 			bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
269 }
270 
271 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
272 				  struct amdgpu_ring *ring, int xcc_id)
273 {
274 	int queue_bit;
275 	int mec, pipe, queue;
276 
277 	queue_bit = adev->gfx.mec.num_mec
278 		    * adev->gfx.mec.num_pipe_per_mec
279 		    * adev->gfx.mec.num_queue_per_pipe;
280 
281 	while (--queue_bit >= 0) {
282 		if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
283 			continue;
284 
285 		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
286 
287 		/*
288 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
289 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
290 		 * only can be issued on queue 0.
291 		 */
292 		if ((mec == 1 && pipe > 1) || queue != 0)
293 			continue;
294 
295 		ring->me = mec + 1;
296 		ring->pipe = pipe;
297 		ring->queue = queue;
298 
299 		return 0;
300 	}
301 
302 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
303 	return -EINVAL;
304 }
305 
306 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
307 {
308 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
309 	struct amdgpu_irq_src *irq = &kiq->irq;
310 	struct amdgpu_ring *ring = &kiq->ring;
311 	int r = 0;
312 
313 	spin_lock_init(&kiq->ring_lock);
314 
315 	ring->adev = NULL;
316 	ring->ring_obj = NULL;
317 	ring->use_doorbell = true;
318 	ring->xcc_id = xcc_id;
319 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
320 	ring->doorbell_index =
321 		(adev->doorbell_index.kiq +
322 		 xcc_id * adev->doorbell_index.xcc_doorbell_range)
323 		<< 1;
324 
325 	r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
326 	if (r)
327 		return r;
328 
329 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
330 	ring->no_scheduler = true;
331 	snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
332 		 (unsigned char)xcc_id, (unsigned char)ring->me,
333 		 (unsigned char)ring->pipe, (unsigned char)ring->queue);
334 	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
335 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
336 	if (r)
337 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
338 
339 	return r;
340 }
341 
342 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
343 {
344 	amdgpu_ring_fini(ring);
345 }
346 
347 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
348 {
349 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
350 
351 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
352 }
353 
354 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
355 			unsigned int hpd_size, int xcc_id)
356 {
357 	int r;
358 	u32 *hpd;
359 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
360 
361 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
362 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
363 				    &kiq->eop_gpu_addr, (void **)&hpd);
364 	if (r) {
365 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
366 		return r;
367 	}
368 
369 	memset(hpd, 0, hpd_size);
370 
371 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
372 	if (unlikely(r != 0))
373 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
374 	amdgpu_bo_kunmap(kiq->eop_obj);
375 	amdgpu_bo_unreserve(kiq->eop_obj);
376 
377 	return 0;
378 }
379 
380 /* create MQD for each compute/gfx queue */
381 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
382 			   unsigned int mqd_size, int xcc_id)
383 {
384 	int r, i, j;
385 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
386 	struct amdgpu_ring *ring = &kiq->ring;
387 	u32 domain = AMDGPU_GEM_DOMAIN_GTT;
388 	u32 gfx_mqd_size = max(adev->mqds[AMDGPU_HW_IP_GFX].mqd_size, mqd_size);
389 	u32 compute_mqd_size = max(adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size, mqd_size);
390 
391 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
392 	/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
393 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
394 		domain |= AMDGPU_GEM_DOMAIN_VRAM;
395 #endif
396 
397 	/* create MQD for KIQ */
398 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
399 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
400 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
401 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
402 		 * KIQ MQD no matter SRIOV or Bare-metal
403 		 */
404 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
405 					    AMDGPU_GEM_DOMAIN_VRAM |
406 					    AMDGPU_GEM_DOMAIN_GTT,
407 					    &ring->mqd_obj,
408 					    &ring->mqd_gpu_addr,
409 					    &ring->mqd_ptr);
410 		if (r) {
411 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
412 			return r;
413 		}
414 
415 		/* prepare MQD backup */
416 		kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
417 		if (!kiq->mqd_backup) {
418 			dev_warn(adev->dev,
419 				 "no memory to create MQD backup for ring %s\n", ring->name);
420 			return -ENOMEM;
421 		}
422 	}
423 
424 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
425 		/* create MQD for each KGQ */
426 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
427 			ring = &adev->gfx.gfx_ring[i];
428 			if (!ring->mqd_obj) {
429 				r = amdgpu_bo_create_kernel(adev, AMDGPU_MQD_SIZE_ALIGN(gfx_mqd_size),
430 								PAGE_SIZE, domain, &ring->mqd_obj,
431 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
432 				if (r) {
433 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
434 					return r;
435 				}
436 
437 				ring->mqd_size = gfx_mqd_size;
438 				/* prepare MQD backup */
439 				adev->gfx.me.mqd_backup[i] = kzalloc(gfx_mqd_size, GFP_KERNEL);
440 				if (!adev->gfx.me.mqd_backup[i]) {
441 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
442 					return -ENOMEM;
443 				}
444 			}
445 		}
446 	}
447 
448 	/* create MQD for each KCQ */
449 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
450 		j = i + xcc_id * adev->gfx.num_compute_rings;
451 		ring = &adev->gfx.compute_ring[j];
452 		if (!ring->mqd_obj) {
453 			r = amdgpu_bo_create_kernel(adev, AMDGPU_MQD_SIZE_ALIGN(compute_mqd_size),
454 							PAGE_SIZE, domain, &ring->mqd_obj,
455 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
456 			if (r) {
457 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
458 				return r;
459 			}
460 
461 			ring->mqd_size = compute_mqd_size;
462 			/* prepare MQD backup */
463 			adev->gfx.mec.mqd_backup[j] = kzalloc(compute_mqd_size, GFP_KERNEL);
464 			if (!adev->gfx.mec.mqd_backup[j]) {
465 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
466 				return -ENOMEM;
467 			}
468 		}
469 	}
470 
471 	return 0;
472 }
473 
474 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
475 {
476 	struct amdgpu_ring *ring = NULL;
477 	int i, j;
478 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
479 
480 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
481 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
482 			ring = &adev->gfx.gfx_ring[i];
483 			kfree(adev->gfx.me.mqd_backup[i]);
484 			amdgpu_bo_free_kernel(&ring->mqd_obj,
485 					      &ring->mqd_gpu_addr,
486 					      &ring->mqd_ptr);
487 		}
488 	}
489 
490 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
491 		j = i + xcc_id * adev->gfx.num_compute_rings;
492 		ring = &adev->gfx.compute_ring[j];
493 		kfree(adev->gfx.mec.mqd_backup[j]);
494 		amdgpu_bo_free_kernel(&ring->mqd_obj,
495 				      &ring->mqd_gpu_addr,
496 				      &ring->mqd_ptr);
497 	}
498 
499 	ring = &kiq->ring;
500 	kfree(kiq->mqd_backup);
501 	amdgpu_bo_free_kernel(&ring->mqd_obj,
502 			      &ring->mqd_gpu_addr,
503 			      &ring->mqd_ptr);
504 }
505 
506 void amdgpu_gfx_mqd_symmetrically_map_cu_mask(struct amdgpu_device *adev, const uint32_t *cu_mask,
507 					      uint32_t cu_mask_count, uint32_t *se_mask)
508 {
509 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
510 	struct amdgpu_gfx_config *gfx_info = &adev->gfx.config;
511 	uint32_t cu_per_sh[8][4] = {0};
512 	int i, se, sh, cu, cu_bitmap_sh_mul;
513 	int xcc_inst = ffs(adev->gfx.xcc_mask) - 1;
514 	bool wgp_mode_req = amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0);
515 	int cu_inc = wgp_mode_req ? 2 : 1;
516 	uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
517 	int num_xcc, inc, inst = 0;
518 
519 	if (xcc_inst < 0)
520 		xcc_inst = 0;
521 
522 	num_xcc = hweight16(adev->gfx.xcc_mask);
523 	if (!num_xcc)
524 		num_xcc = 1;
525 
526 	inc = cu_inc * num_xcc;
527 
528 	cu_bitmap_sh_mul = 2;
529 
530 	for (se = 0; se < gfx_info->max_shader_engines; se++)
531 		for (sh = 0; sh < gfx_info->max_sh_per_se; sh++)
532 			cu_per_sh[se][sh] = hweight32(
533 				cu_info->bitmap[xcc_inst][se % 4][sh + (se / 4) *
534 				cu_bitmap_sh_mul]);
535 
536 	for (i = 0; i < gfx_info->max_shader_engines; i++)
537 		se_mask[i] = 0;
538 
539 	i = inst;
540 	for (cu = 0; cu < 16; cu += cu_inc) {
541 		for (sh = 0; sh < gfx_info->max_sh_per_se; sh++) {
542 			for (se = 0; se < gfx_info->max_shader_engines; se++) {
543 				if (cu_per_sh[se][sh] > cu) {
544 					if ((i / 32) < cu_mask_count && (cu_mask[i / 32] & (1 << (i % 32))))
545 						se_mask[se] |= en_mask << (cu + sh * 16);
546 					i += inc;
547 					if (i >= cu_mask_count * 32)
548 						return;
549 				}
550 			}
551 		}
552 	}
553 }
554 
555 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
556 {
557 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
558 	struct amdgpu_ring *kiq_ring = &kiq->ring;
559 	int i, r = 0;
560 	int j;
561 
562 	if (adev->enable_mes) {
563 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
564 			j = i + xcc_id * adev->gfx.num_compute_rings;
565 			amdgpu_mes_unmap_legacy_queue(adev,
566 						   &adev->gfx.compute_ring[j],
567 						   RESET_QUEUES, 0, 0, xcc_id);
568 		}
569 		return 0;
570 	}
571 
572 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
573 		return -EINVAL;
574 
575 	if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
576 		return 0;
577 
578 	spin_lock(&kiq->ring_lock);
579 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
580 					adev->gfx.num_compute_rings)) {
581 		spin_unlock(&kiq->ring_lock);
582 		return -ENOMEM;
583 	}
584 
585 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
586 		j = i + xcc_id * adev->gfx.num_compute_rings;
587 		kiq->pmf->kiq_unmap_queues(kiq_ring,
588 					   &adev->gfx.compute_ring[j],
589 					   RESET_QUEUES, 0, 0);
590 	}
591 	/* Submit unmap queue packet */
592 	amdgpu_ring_commit(kiq_ring);
593 	/*
594 	 * Ring test will do a basic scratch register change check. Just run
595 	 * this to ensure that unmap queues that is submitted before got
596 	 * processed successfully before returning.
597 	 */
598 	r = amdgpu_ring_test_helper(kiq_ring);
599 
600 	spin_unlock(&kiq->ring_lock);
601 
602 	return r;
603 }
604 
605 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
606 {
607 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
608 	struct amdgpu_ring *kiq_ring = &kiq->ring;
609 	int i, r = 0;
610 	int j;
611 
612 	if (adev->enable_mes) {
613 		if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
614 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
615 				j = i + xcc_id * adev->gfx.num_gfx_rings;
616 				amdgpu_mes_unmap_legacy_queue(adev,
617 						      &adev->gfx.gfx_ring[j],
618 						      PREEMPT_QUEUES, 0, 0, xcc_id);
619 			}
620 		}
621 		return 0;
622 	}
623 
624 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
625 		return -EINVAL;
626 
627 	if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
628 		return 0;
629 
630 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
631 		spin_lock(&kiq->ring_lock);
632 		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
633 						adev->gfx.num_gfx_rings)) {
634 			spin_unlock(&kiq->ring_lock);
635 			return -ENOMEM;
636 		}
637 
638 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
639 			j = i + xcc_id * adev->gfx.num_gfx_rings;
640 			kiq->pmf->kiq_unmap_queues(kiq_ring,
641 						   &adev->gfx.gfx_ring[j],
642 						   PREEMPT_QUEUES, 0, 0);
643 		}
644 		/* Submit unmap queue packet */
645 		amdgpu_ring_commit(kiq_ring);
646 
647 		/*
648 		 * Ring test will do a basic scratch register change check.
649 		 * Just run this to ensure that unmap queues that is submitted
650 		 * before got processed successfully before returning.
651 		 */
652 		r = amdgpu_ring_test_helper(kiq_ring);
653 		spin_unlock(&kiq->ring_lock);
654 	}
655 
656 	return r;
657 }
658 
659 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
660 					int queue_bit)
661 {
662 	int mec, pipe, queue;
663 	int set_resource_bit = 0;
664 
665 	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
666 
667 	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
668 
669 	return set_resource_bit;
670 }
671 
672 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
673 {
674 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
675 	struct amdgpu_ring *kiq_ring = &kiq->ring;
676 	uint64_t queue_mask = ~0ULL;
677 	int r, i, j;
678 
679 	amdgpu_device_flush_hdp(adev, NULL);
680 
681 	if (!adev->enable_uni_mes) {
682 		spin_lock(&kiq->ring_lock);
683 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
684 		if (r) {
685 			dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
686 			spin_unlock(&kiq->ring_lock);
687 			return r;
688 		}
689 
690 		kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
691 		r = amdgpu_ring_test_helper(kiq_ring);
692 		spin_unlock(&kiq->ring_lock);
693 		if (r)
694 			dev_err(adev->dev, "KIQ failed to set resources\n");
695 	}
696 
697 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
698 		j = i + xcc_id * adev->gfx.num_compute_rings;
699 		r = amdgpu_mes_map_legacy_queue(adev,
700 						&adev->gfx.compute_ring[j],
701 						xcc_id);
702 		if (r) {
703 			dev_err(adev->dev, "failed to map compute queue\n");
704 			return r;
705 		}
706 	}
707 
708 	return 0;
709 }
710 
711 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
712 {
713 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
714 	struct amdgpu_ring *kiq_ring = &kiq->ring;
715 	uint64_t queue_mask = 0;
716 	int r, i, j;
717 
718 	if (adev->mes.enable_legacy_queue_map)
719 		return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
720 
721 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
722 		return -EINVAL;
723 
724 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
725 		if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
726 			continue;
727 
728 		/* This situation may be hit in the future if a new HW
729 		 * generation exposes more than 64 queues. If so, the
730 		 * definition of queue_mask needs updating */
731 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
732 			dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
733 			break;
734 		}
735 
736 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
737 	}
738 
739 	amdgpu_device_flush_hdp(adev, NULL);
740 
741 	dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
742 		 kiq_ring->pipe, kiq_ring->queue);
743 
744 	spin_lock(&kiq->ring_lock);
745 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
746 					adev->gfx.num_compute_rings +
747 					kiq->pmf->set_resources_size);
748 	if (r) {
749 		dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
750 		spin_unlock(&kiq->ring_lock);
751 		return r;
752 	}
753 
754 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
755 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
756 		j = i + xcc_id * adev->gfx.num_compute_rings;
757 		kiq->pmf->kiq_map_queues(kiq_ring,
758 					 &adev->gfx.compute_ring[j]);
759 	}
760 	/* Submit map queue packet */
761 	amdgpu_ring_commit(kiq_ring);
762 	/*
763 	 * Ring test will do a basic scratch register change check. Just run
764 	 * this to ensure that map queues that is submitted before got
765 	 * processed successfully before returning.
766 	 */
767 	r = amdgpu_ring_test_helper(kiq_ring);
768 	spin_unlock(&kiq->ring_lock);
769 	if (r)
770 		dev_err(adev->dev, "KCQ enable failed\n");
771 
772 	return r;
773 }
774 
775 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
776 {
777 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
778 	struct amdgpu_ring *kiq_ring = &kiq->ring;
779 	int r, i, j;
780 
781 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
782 		return -EINVAL;
783 
784 	amdgpu_device_flush_hdp(adev, NULL);
785 
786 	if (adev->mes.enable_legacy_queue_map) {
787 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
788 			j = i + xcc_id * adev->gfx.num_gfx_rings;
789 			r = amdgpu_mes_map_legacy_queue(adev,
790 							&adev->gfx.gfx_ring[j],
791 							xcc_id);
792 			if (r) {
793 				dev_err(adev->dev, "failed to map gfx queue\n");
794 				return r;
795 			}
796 		}
797 
798 		return 0;
799 	}
800 
801 	spin_lock(&kiq->ring_lock);
802 	/* No need to map kcq on the slave */
803 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
804 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
805 						adev->gfx.num_gfx_rings);
806 		if (r) {
807 			dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
808 			spin_unlock(&kiq->ring_lock);
809 			return r;
810 		}
811 
812 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
813 			j = i + xcc_id * adev->gfx.num_gfx_rings;
814 			kiq->pmf->kiq_map_queues(kiq_ring,
815 						 &adev->gfx.gfx_ring[j]);
816 		}
817 	}
818 	/* Submit map queue packet */
819 	amdgpu_ring_commit(kiq_ring);
820 	/*
821 	 * Ring test will do a basic scratch register change check. Just run
822 	 * this to ensure that map queues that is submitted before got
823 	 * processed successfully before returning.
824 	 */
825 	r = amdgpu_ring_test_helper(kiq_ring);
826 	spin_unlock(&kiq->ring_lock);
827 	if (r)
828 		dev_err(adev->dev, "KGQ enable failed\n");
829 
830 	return r;
831 }
832 
833 static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
834 				   bool no_delay)
835 {
836 	unsigned long delay = GFX_OFF_DELAY_ENABLE;
837 
838 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
839 		return;
840 
841 	mutex_lock(&adev->gfx.gfx_off_mutex);
842 
843 	if (enable) {
844 		/* If the count is already 0, it means there's an imbalance bug somewhere.
845 		 * Note that the bug may be in a different caller than the one which triggers the
846 		 * WARN_ON_ONCE.
847 		 */
848 		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
849 			goto unlock;
850 
851 		adev->gfx.gfx_off_req_count--;
852 
853 		if (adev->gfx.gfx_off_req_count == 0 &&
854 		    !adev->gfx.gfx_off_state) {
855 			/* If going to s2idle, no need to wait */
856 			if (no_delay) {
857 				if (!amdgpu_dpm_set_powergating_by_smu(adev,
858 						AMD_IP_BLOCK_TYPE_GFX, true, 0))
859 					adev->gfx.gfx_off_state = true;
860 			} else {
861 				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
862 					      delay);
863 			}
864 		}
865 	} else {
866 		if (adev->gfx.gfx_off_req_count == 0) {
867 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
868 
869 			if (adev->gfx.gfx_off_state &&
870 			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
871 				adev->gfx.gfx_off_state = false;
872 
873 				if (adev->gfx.funcs->init_spm_golden) {
874 					dev_dbg(adev->dev,
875 						"GFXOFF is disabled, re-init SPM golden settings\n");
876 					amdgpu_gfx_init_spm_golden(adev);
877 				}
878 			}
879 		}
880 
881 		adev->gfx.gfx_off_req_count++;
882 	}
883 
884 unlock:
885 	mutex_unlock(&adev->gfx.gfx_off_mutex);
886 }
887 
888 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
889  *
890  * @adev: amdgpu_device pointer
891  * @bool enable true: enable gfx off feature, false: disable gfx off feature
892  *
893  * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
894  * 2. other client can send request to disable gfx off feature, the request should be honored.
895  * 3. other client can cancel their request of disable gfx off feature
896  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
897  *
898  * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
899  */
900 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
901 {
902 	/* If going to s2idle, no need to wait */
903 	bool no_delay = adev->in_s0ix ? true : false;
904 
905 	amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
906 }
907 
908 /* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
909  *
910  * @adev: amdgpu_device pointer
911  * @bool enable true: enable gfx off feature, false: disable gfx off feature
912  *
913  * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
914  * 2. other client can send request to disable gfx off feature, the request should be honored.
915  * 3. other client can cancel their request of disable gfx off feature
916  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
917  *
918  * gfx off allow will be issued immediately.
919  */
920 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
921 {
922 	amdgpu_gfx_do_off_ctrl(adev, enable, true);
923 }
924 
925 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
926 {
927 	int r = 0;
928 
929 	mutex_lock(&adev->gfx.gfx_off_mutex);
930 
931 	r = amdgpu_dpm_set_residency_gfxoff(adev, value);
932 
933 	mutex_unlock(&adev->gfx.gfx_off_mutex);
934 
935 	return r;
936 }
937 
938 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
939 {
940 	int r = 0;
941 
942 	mutex_lock(&adev->gfx.gfx_off_mutex);
943 
944 	r = amdgpu_dpm_get_residency_gfxoff(adev, value);
945 
946 	mutex_unlock(&adev->gfx.gfx_off_mutex);
947 
948 	return r;
949 }
950 
951 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
952 {
953 	int r = 0;
954 
955 	mutex_lock(&adev->gfx.gfx_off_mutex);
956 
957 	r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
958 
959 	mutex_unlock(&adev->gfx.gfx_off_mutex);
960 
961 	return r;
962 }
963 
964 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
965 {
966 
967 	int r = 0;
968 
969 	mutex_lock(&adev->gfx.gfx_off_mutex);
970 
971 	r = amdgpu_dpm_get_status_gfxoff(adev, value);
972 
973 	mutex_unlock(&adev->gfx.gfx_off_mutex);
974 
975 	return r;
976 }
977 
978 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
979 {
980 	int r;
981 
982 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
983 		if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
984 			r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
985 			if (r)
986 				return r;
987 		}
988 
989 		r = amdgpu_ras_block_late_init(adev, ras_block);
990 		if (r)
991 			return r;
992 
993 		if (amdgpu_sriov_vf(adev))
994 			return r;
995 
996 		if (adev->gfx.cp_ecc_error_irq.funcs) {
997 			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
998 			if (r)
999 				goto late_fini;
1000 		}
1001 	} else {
1002 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
1003 	}
1004 
1005 	return 0;
1006 late_fini:
1007 	amdgpu_ras_block_late_fini(adev, ras_block);
1008 	return r;
1009 }
1010 
1011 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
1012 {
1013 	int err = 0;
1014 	struct amdgpu_gfx_ras *ras = NULL;
1015 
1016 	/* adev->gfx.ras is NULL, which means gfx does not
1017 	 * support ras function, then do nothing here.
1018 	 */
1019 	if (!adev->gfx.ras)
1020 		return 0;
1021 
1022 	ras = adev->gfx.ras;
1023 
1024 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1025 	if (err) {
1026 		dev_err(adev->dev, "Failed to register gfx ras block!\n");
1027 		return err;
1028 	}
1029 
1030 	strcpy(ras->ras_block.ras_comm.name, "gfx");
1031 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
1032 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
1033 	adev->gfx.ras_if = &ras->ras_block.ras_comm;
1034 
1035 	/* If not define special ras_late_init function, use gfx default ras_late_init */
1036 	if (!ras->ras_block.ras_late_init)
1037 		ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
1038 
1039 	/* If not defined special ras_cb function, use default ras_cb */
1040 	if (!ras->ras_block.ras_cb)
1041 		ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
1042 
1043 	return 0;
1044 }
1045 
1046 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
1047 						struct amdgpu_iv_entry *entry)
1048 {
1049 	if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
1050 		return adev->gfx.ras->poison_consumption_handler(adev, entry);
1051 
1052 	return 0;
1053 }
1054 
1055 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
1056 		void *err_data,
1057 		struct amdgpu_iv_entry *entry)
1058 {
1059 	/* TODO ue will trigger an interrupt.
1060 	 *
1061 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
1062 	 * be disabled and the driver should only look for the aggregated
1063 	 * interrupt via sync flood
1064 	 */
1065 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
1066 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
1067 		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
1068 		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
1069 			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1070 		amdgpu_ras_reset_gpu(adev);
1071 	}
1072 	return AMDGPU_RAS_SUCCESS;
1073 }
1074 
1075 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
1076 				  struct amdgpu_irq_src *source,
1077 				  struct amdgpu_iv_entry *entry)
1078 {
1079 	struct ras_common_if *ras_if = adev->gfx.ras_if;
1080 	struct ras_dispatch_if ih_data = {
1081 		.entry = entry,
1082 	};
1083 
1084 	if (!ras_if)
1085 		return 0;
1086 
1087 	ih_data.head = *ras_if;
1088 
1089 	dev_err(adev->dev, "CP ECC ERROR IRQ\n");
1090 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1091 	return 0;
1092 }
1093 
1094 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
1095 		void *ras_error_status,
1096 		void (*func)(struct amdgpu_device *adev, void *ras_error_status,
1097 				int xcc_id))
1098 {
1099 	int i;
1100 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1101 	uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1102 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1103 
1104 	if (err_data) {
1105 		err_data->ue_count = 0;
1106 		err_data->ce_count = 0;
1107 	}
1108 
1109 	for_each_inst(i, xcc_mask)
1110 		func(adev, ras_error_status, i);
1111 }
1112 
1113 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1114 {
1115 	signed long r, cnt = 0;
1116 	unsigned long flags;
1117 	uint32_t seq, reg_val_offs = 0, value = 0;
1118 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1119 	struct amdgpu_ring *ring = &kiq->ring;
1120 
1121 	if (amdgpu_device_skip_hw_access(adev))
1122 		return 0;
1123 
1124 	if (adev->mes.ring[0].sched.ready)
1125 		return amdgpu_mes_rreg(adev, reg, xcc_id);
1126 
1127 	BUG_ON(!ring->funcs->emit_rreg);
1128 
1129 	spin_lock_irqsave(&kiq->ring_lock, flags);
1130 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
1131 		pr_err("critical bug! too many kiq readers\n");
1132 		goto failed_unlock;
1133 	}
1134 	r = amdgpu_ring_alloc(ring, 32);
1135 	if (r)
1136 		goto failed_unlock;
1137 
1138 	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1139 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1140 	if (r)
1141 		goto failed_undo;
1142 
1143 	amdgpu_ring_commit(ring);
1144 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1145 
1146 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1147 
1148 	/* don't wait anymore for gpu reset case because this way may
1149 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1150 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1151 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1152 	 * gpu_recover() hang there.
1153 	 *
1154 	 * also don't wait anymore for IRQ context
1155 	 * */
1156 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1157 		goto failed_kiq_read;
1158 
1159 	might_sleep();
1160 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1161 		if (amdgpu_in_reset(adev))
1162 			goto failed_kiq_read;
1163 
1164 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1165 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1166 	}
1167 
1168 	if (cnt > MAX_KIQ_REG_TRY)
1169 		goto failed_kiq_read;
1170 
1171 	mb();
1172 	value = adev->wb.wb[reg_val_offs];
1173 	amdgpu_device_wb_free(adev, reg_val_offs);
1174 	return value;
1175 
1176 failed_undo:
1177 	amdgpu_ring_undo(ring);
1178 failed_unlock:
1179 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1180 failed_kiq_read:
1181 	if (reg_val_offs)
1182 		amdgpu_device_wb_free(adev, reg_val_offs);
1183 	dev_err(adev->dev, "failed to read reg:%x\n", reg);
1184 	return ~0;
1185 }
1186 
1187 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1188 {
1189 	signed long r, cnt = 0;
1190 	unsigned long flags;
1191 	uint32_t seq;
1192 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1193 	struct amdgpu_ring *ring = &kiq->ring;
1194 
1195 	BUG_ON(!ring->funcs->emit_wreg);
1196 
1197 	if (amdgpu_device_skip_hw_access(adev))
1198 		return;
1199 
1200 	if (adev->mes.ring[0].sched.ready) {
1201 		amdgpu_mes_wreg(adev, reg, v, xcc_id);
1202 		return;
1203 	}
1204 
1205 	spin_lock_irqsave(&kiq->ring_lock, flags);
1206 	r = amdgpu_ring_alloc(ring, 32);
1207 	if (r)
1208 		goto failed_unlock;
1209 
1210 	amdgpu_ring_emit_wreg(ring, reg, v);
1211 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1212 	if (r)
1213 		goto failed_undo;
1214 
1215 	amdgpu_ring_commit(ring);
1216 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1217 
1218 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1219 
1220 	/* don't wait anymore for gpu reset case because this way may
1221 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1222 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1223 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1224 	 * gpu_recover() hang there.
1225 	 *
1226 	 * also don't wait anymore for IRQ context
1227 	 * */
1228 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1229 		goto failed_kiq_write;
1230 
1231 	might_sleep();
1232 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1233 		if (amdgpu_in_reset(adev))
1234 			goto failed_kiq_write;
1235 
1236 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1237 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1238 	}
1239 
1240 	if (cnt > MAX_KIQ_REG_TRY)
1241 		goto failed_kiq_write;
1242 
1243 	return;
1244 
1245 failed_undo:
1246 	amdgpu_ring_undo(ring);
1247 failed_unlock:
1248 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1249 failed_kiq_write:
1250 	dev_err(adev->dev, "failed to write reg:%x\n", reg);
1251 }
1252 
1253 void amdgpu_gfx_get_hdp_flush_mask(struct amdgpu_ring *ring,
1254 		uint32_t *hdp_flush_mask, uint32_t *reg_mem_engine)
1255 {
1256 
1257 	if (!ring || !hdp_flush_mask || !reg_mem_engine) {
1258 		DRM_INFO("%s:invalid params\n", __func__);
1259 		return;
1260 	}
1261 
1262 	const struct nbio_hdp_flush_reg *nbio_hf_reg = ring->adev->nbio.hdp_flush_reg;
1263 
1264 	switch (ring->funcs->type) {
1265 	case AMDGPU_RING_TYPE_GFX:
1266 		*hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
1267 		*reg_mem_engine = 1; /* pfp */
1268 		break;
1269 	case AMDGPU_RING_TYPE_COMPUTE:
1270 		*hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
1271 		*reg_mem_engine = 0;
1272 		break;
1273 	case AMDGPU_RING_TYPE_MES:
1274 		*hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp8;
1275 		*reg_mem_engine = 0;
1276 		break;
1277 	case AMDGPU_RING_TYPE_KIQ:
1278 		*hdp_flush_mask = nbio_hf_reg->ref_and_mask_cp9;
1279 		*reg_mem_engine = 0;
1280 		break;
1281 	default:
1282 		DRM_ERROR("%s:unsupported ring type %d\n", __func__, ring->funcs->type);
1283 		return;
1284 	}
1285 }
1286 
1287 int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
1288 {
1289 	signed long r, cnt = 0;
1290 	unsigned long flags;
1291 	uint32_t seq;
1292 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1293 	struct amdgpu_ring *ring = &kiq->ring;
1294 
1295 	if (amdgpu_device_skip_hw_access(adev))
1296 		return 0;
1297 
1298 	if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
1299 		return amdgpu_mes_hdp_flush(adev);
1300 
1301 	if (!ring->funcs->emit_hdp_flush) {
1302 		return -EOPNOTSUPP;
1303 	}
1304 
1305 	spin_lock_irqsave(&kiq->ring_lock, flags);
1306 	r = amdgpu_ring_alloc(ring, 32);
1307 	if (r)
1308 		goto failed_unlock;
1309 
1310 	amdgpu_ring_emit_hdp_flush(ring);
1311 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1312 	if (r)
1313 		goto failed_undo;
1314 
1315 	amdgpu_ring_commit(ring);
1316 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1317 
1318 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1319 
1320 	/* don't wait anymore for gpu reset case because this way may
1321 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1322 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1323 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1324 	 * gpu_recover() hang there.
1325 	 *
1326 	 * also don't wait anymore for IRQ context
1327 	 * */
1328 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1329 		goto failed_kiq_hdp_flush;
1330 
1331 	might_sleep();
1332 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1333 		if (amdgpu_in_reset(adev))
1334 			goto failed_kiq_hdp_flush;
1335 
1336 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1337 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1338 	}
1339 
1340 	if (cnt > MAX_KIQ_REG_TRY) {
1341 		dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
1342 		return -ETIMEDOUT;
1343 	}
1344 
1345 	return 0;
1346 
1347 failed_undo:
1348 	amdgpu_ring_undo(ring);
1349 failed_unlock:
1350 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1351 failed_kiq_hdp_flush:
1352 	if (!amdgpu_in_reset(adev))
1353 		dev_err(adev->dev, "failed to flush HDP via KIQ\n");
1354 	return r < 0 ? r : -EIO;
1355 }
1356 
1357 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1358 {
1359 	if (amdgpu_num_kcq == -1) {
1360 		return 8;
1361 	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1362 		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1363 		return 8;
1364 	}
1365 	return amdgpu_num_kcq;
1366 }
1367 
1368 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1369 				  uint32_t ucode_id)
1370 {
1371 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1372 	const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1373 	struct amdgpu_firmware_info *info = NULL;
1374 	const struct firmware *ucode_fw;
1375 	unsigned int fw_size;
1376 
1377 	switch (ucode_id) {
1378 	case AMDGPU_UCODE_ID_CP_PFP:
1379 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1380 			adev->gfx.pfp_fw->data;
1381 		adev->gfx.pfp_fw_version =
1382 			le32_to_cpu(cp_hdr->header.ucode_version);
1383 		adev->gfx.pfp_feature_version =
1384 			le32_to_cpu(cp_hdr->ucode_feature_version);
1385 		ucode_fw = adev->gfx.pfp_fw;
1386 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1387 		break;
1388 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
1389 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1390 			adev->gfx.pfp_fw->data;
1391 		adev->gfx.pfp_fw_version =
1392 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1393 		adev->gfx.pfp_feature_version =
1394 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1395 		ucode_fw = adev->gfx.pfp_fw;
1396 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1397 		break;
1398 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1399 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1400 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1401 			adev->gfx.pfp_fw->data;
1402 		ucode_fw = adev->gfx.pfp_fw;
1403 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1404 		break;
1405 	case AMDGPU_UCODE_ID_CP_ME:
1406 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1407 			adev->gfx.me_fw->data;
1408 		adev->gfx.me_fw_version =
1409 			le32_to_cpu(cp_hdr->header.ucode_version);
1410 		adev->gfx.me_feature_version =
1411 			le32_to_cpu(cp_hdr->ucode_feature_version);
1412 		ucode_fw = adev->gfx.me_fw;
1413 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1414 		break;
1415 	case AMDGPU_UCODE_ID_CP_RS64_ME:
1416 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1417 			adev->gfx.me_fw->data;
1418 		adev->gfx.me_fw_version =
1419 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1420 		adev->gfx.me_feature_version =
1421 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1422 		ucode_fw = adev->gfx.me_fw;
1423 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1424 		break;
1425 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1426 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1427 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1428 			adev->gfx.me_fw->data;
1429 		ucode_fw = adev->gfx.me_fw;
1430 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1431 		break;
1432 	case AMDGPU_UCODE_ID_CP_CE:
1433 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1434 			adev->gfx.ce_fw->data;
1435 		adev->gfx.ce_fw_version =
1436 			le32_to_cpu(cp_hdr->header.ucode_version);
1437 		adev->gfx.ce_feature_version =
1438 			le32_to_cpu(cp_hdr->ucode_feature_version);
1439 		ucode_fw = adev->gfx.ce_fw;
1440 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1441 		break;
1442 	case AMDGPU_UCODE_ID_CP_MEC1:
1443 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1444 			adev->gfx.mec_fw->data;
1445 		adev->gfx.mec_fw_version =
1446 			le32_to_cpu(cp_hdr->header.ucode_version);
1447 		adev->gfx.mec_feature_version =
1448 			le32_to_cpu(cp_hdr->ucode_feature_version);
1449 		ucode_fw = adev->gfx.mec_fw;
1450 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1451 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1452 		break;
1453 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1454 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1455 			adev->gfx.mec_fw->data;
1456 		ucode_fw = adev->gfx.mec_fw;
1457 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1458 		break;
1459 	case AMDGPU_UCODE_ID_CP_MEC2:
1460 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1461 			adev->gfx.mec2_fw->data;
1462 		adev->gfx.mec2_fw_version =
1463 			le32_to_cpu(cp_hdr->header.ucode_version);
1464 		adev->gfx.mec2_feature_version =
1465 			le32_to_cpu(cp_hdr->ucode_feature_version);
1466 		ucode_fw = adev->gfx.mec2_fw;
1467 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1468 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1469 		break;
1470 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1471 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1472 			adev->gfx.mec2_fw->data;
1473 		ucode_fw = adev->gfx.mec2_fw;
1474 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1475 		break;
1476 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
1477 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1478 			adev->gfx.mec_fw->data;
1479 		adev->gfx.mec_fw_version =
1480 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1481 		adev->gfx.mec_feature_version =
1482 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1483 		ucode_fw = adev->gfx.mec_fw;
1484 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1485 		break;
1486 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1487 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1488 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1489 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1490 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1491 			adev->gfx.mec_fw->data;
1492 		ucode_fw = adev->gfx.mec_fw;
1493 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1494 		break;
1495 	default:
1496 		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1497 		return;
1498 	}
1499 
1500 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1501 		info = &adev->firmware.ucode[ucode_id];
1502 		info->ucode_id = ucode_id;
1503 		info->fw = ucode_fw;
1504 		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1505 	}
1506 }
1507 
1508 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1509 {
1510 	return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1511 			adev->gfx.num_xcc_per_xcp : 1));
1512 }
1513 
1514 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1515 						struct device_attribute *addr,
1516 						char *buf)
1517 {
1518 	struct drm_device *ddev = dev_get_drvdata(dev);
1519 	struct amdgpu_device *adev = drm_to_adev(ddev);
1520 	int mode;
1521 
1522 	/* Only minimal precaution taken to reject requests while in reset.*/
1523 	if (amdgpu_in_reset(adev))
1524 		return -EPERM;
1525 
1526 	mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1527 					       AMDGPU_XCP_FL_NONE);
1528 
1529 	return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1530 }
1531 
1532 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1533 						struct device_attribute *addr,
1534 						const char *buf, size_t count)
1535 {
1536 	struct drm_device *ddev = dev_get_drvdata(dev);
1537 	struct amdgpu_device *adev = drm_to_adev(ddev);
1538 	enum amdgpu_gfx_partition mode;
1539 	int ret = 0, num_xcc;
1540 
1541 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1542 	if (num_xcc % 2 != 0)
1543 		return -EINVAL;
1544 
1545 	if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1546 		mode = AMDGPU_SPX_PARTITION_MODE;
1547 	} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1548 		/*
1549 		 * DPX mode needs AIDs to be in multiple of 2.
1550 		 * Each AID connects 2 XCCs.
1551 		 */
1552 		if (num_xcc%4)
1553 			return -EINVAL;
1554 		mode = AMDGPU_DPX_PARTITION_MODE;
1555 	} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1556 		if (num_xcc != 6)
1557 			return -EINVAL;
1558 		mode = AMDGPU_TPX_PARTITION_MODE;
1559 	} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1560 		if (num_xcc != 8)
1561 			return -EINVAL;
1562 		mode = AMDGPU_QPX_PARTITION_MODE;
1563 	} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1564 		mode = AMDGPU_CPX_PARTITION_MODE;
1565 	} else {
1566 		return -EINVAL;
1567 	}
1568 
1569 	/* Don't allow a switch while under reset */
1570 	if (!down_read_trylock(&adev->reset_domain->sem))
1571 		return -EPERM;
1572 
1573 	ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1574 
1575 	up_read(&adev->reset_domain->sem);
1576 
1577 	if (ret)
1578 		return ret;
1579 
1580 	return count;
1581 }
1582 
1583 static const char *xcp_desc[] = {
1584 	[AMDGPU_SPX_PARTITION_MODE] = "SPX",
1585 	[AMDGPU_DPX_PARTITION_MODE] = "DPX",
1586 	[AMDGPU_TPX_PARTITION_MODE] = "TPX",
1587 	[AMDGPU_QPX_PARTITION_MODE] = "QPX",
1588 	[AMDGPU_CPX_PARTITION_MODE] = "CPX",
1589 };
1590 
1591 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1592 						struct device_attribute *addr,
1593 						char *buf)
1594 {
1595 	struct drm_device *ddev = dev_get_drvdata(dev);
1596 	struct amdgpu_device *adev = drm_to_adev(ddev);
1597 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1598 	int size = 0, mode;
1599 	char *sep = "";
1600 
1601 	if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
1602 		return sysfs_emit(buf, "Not supported\n");
1603 
1604 	for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
1605 		size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
1606 		sep = ", ";
1607 	}
1608 
1609 	size += sysfs_emit_at(buf, size, "\n");
1610 
1611 	return size;
1612 }
1613 
1614 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1615 {
1616 	struct amdgpu_device *adev = ring->adev;
1617 	struct drm_gpu_scheduler *sched = &ring->sched;
1618 	struct drm_sched_entity entity;
1619 	static atomic_t counter;
1620 	struct dma_fence *f;
1621 	struct amdgpu_job *job;
1622 	struct amdgpu_ib *ib;
1623 	void *owner;
1624 	int i, r;
1625 
1626 	/* Initialize the scheduler entity */
1627 	r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1628 				  &sched, 1, NULL);
1629 	if (r) {
1630 		dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1631 		goto err;
1632 	}
1633 
1634 	/*
1635 	 * Use some unique dummy value as the owner to make sure we execute
1636 	 * the cleaner shader on each submission. The value just need to change
1637 	 * for each submission and is otherwise meaningless.
1638 	 */
1639 	owner = (void *)(unsigned long)atomic_inc_return(&counter);
1640 
1641 	r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
1642 				     64, 0, &job,
1643 				     AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
1644 	if (r)
1645 		goto err;
1646 
1647 	job->enforce_isolation = true;
1648 	/* always run the cleaner shader */
1649 	job->run_cleaner_shader = true;
1650 
1651 	ib = &job->ibs[0];
1652 	for (i = 0; i <= ring->funcs->align_mask; ++i)
1653 		ib->ptr[i] = ring->funcs->nop;
1654 	ib->length_dw = ring->funcs->align_mask + 1;
1655 
1656 	f = amdgpu_job_submit(job);
1657 
1658 	r = dma_fence_wait(f, false);
1659 	if (r)
1660 		goto err;
1661 
1662 	dma_fence_put(f);
1663 
1664 	/* Clean up the scheduler entity */
1665 	drm_sched_entity_destroy(&entity);
1666 	return 0;
1667 
1668 err:
1669 	return r;
1670 }
1671 
1672 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1673 {
1674 	int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1675 	struct amdgpu_ring *ring;
1676 	int num_xcc_to_clear;
1677 	int i, r, xcc_id;
1678 
1679 	if (adev->gfx.num_xcc_per_xcp)
1680 		num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1681 	else
1682 		num_xcc_to_clear = 1;
1683 
1684 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1685 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1686 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1687 			if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1688 				r = amdgpu_gfx_run_cleaner_shader_job(ring);
1689 				if (r)
1690 					return r;
1691 				num_xcc_to_clear--;
1692 				break;
1693 			}
1694 		}
1695 	}
1696 
1697 	if (num_xcc_to_clear)
1698 		return -ENOENT;
1699 
1700 	return 0;
1701 }
1702 
1703 /**
1704  * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
1705  * @dev: The device structure
1706  * @attr: The device attribute structure
1707  * @buf: The buffer containing the input data
1708  * @count: The size of the input data
1709  *
1710  * Provides the sysfs interface to manually run a cleaner shader, which is
1711  * used to clear the GPU state between different tasks. Writing a value to the
1712  * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
1713  * The value written corresponds to the partition index on multi-partition
1714  * devices. On single-partition devices, the value should be '0'.
1715  *
1716  * The cleaner shader clears the Local Data Store (LDS) and General Purpose
1717  * Registers (GPRs) to ensure data isolation between GPU workloads.
1718  *
1719  * Return: The number of bytes written to the sysfs file.
1720  */
1721 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1722 						 struct device_attribute *attr,
1723 						 const char *buf,
1724 						 size_t count)
1725 {
1726 	struct drm_device *ddev = dev_get_drvdata(dev);
1727 	struct amdgpu_device *adev = drm_to_adev(ddev);
1728 	int ret;
1729 	long value;
1730 
1731 	if (amdgpu_in_reset(adev))
1732 		return -EPERM;
1733 	if (adev->in_suspend && !adev->in_runpm)
1734 		return -EPERM;
1735 
1736 	if (adev->gfx.disable_kq)
1737 		return -EPERM;
1738 
1739 	ret = kstrtol(buf, 0, &value);
1740 
1741 	if (ret)
1742 		return -EINVAL;
1743 
1744 	if (value < 0)
1745 		return -EINVAL;
1746 
1747 	if (adev->xcp_mgr) {
1748 		if (value >= adev->xcp_mgr->num_xcps)
1749 			return -EINVAL;
1750 	} else {
1751 		if (value > 1)
1752 			return -EINVAL;
1753 	}
1754 
1755 	ret = pm_runtime_get_sync(ddev->dev);
1756 	if (ret < 0) {
1757 		pm_runtime_put_autosuspend(ddev->dev);
1758 		return ret;
1759 	}
1760 
1761 	ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1762 
1763 	pm_runtime_put_autosuspend(ddev->dev);
1764 
1765 	if (ret)
1766 		return ret;
1767 
1768 	return count;
1769 }
1770 
1771 /**
1772  * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
1773  * @dev: The device structure
1774  * @attr: The device attribute structure
1775  * @buf: The buffer to store the output data
1776  *
1777  * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
1778  * feature for each GPU partition. Reading from the 'enforce_isolation'
1779  * sysfs file returns the isolation settings for all partitions, where '0'
1780  * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
1781  * and '3' indicates enabled without cleaner shader.
1782  *
1783  * Return: The number of bytes read from the sysfs file.
1784  */
1785 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1786 						struct device_attribute *attr,
1787 						char *buf)
1788 {
1789 	struct drm_device *ddev = dev_get_drvdata(dev);
1790 	struct amdgpu_device *adev = drm_to_adev(ddev);
1791 	int i;
1792 	ssize_t size = 0;
1793 
1794 	if (adev->xcp_mgr) {
1795 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1796 			size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1797 			if (i < (adev->xcp_mgr->num_xcps - 1))
1798 				size += sysfs_emit_at(buf, size, " ");
1799 		}
1800 		buf[size++] = '\n';
1801 	} else {
1802 		size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1803 	}
1804 
1805 	return size;
1806 }
1807 
1808 /**
1809  * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
1810  * @dev: The device structure
1811  * @attr: The device attribute structure
1812  * @buf: The buffer containing the input data
1813  * @count: The size of the input data
1814  *
1815  * This function allows control over the 'enforce_isolation' feature, which
1816  * serializes access to the graphics engine. Writing '0' to disable, '1' to
1817  * enable isolation with cleaner shader, '2' to enable legacy isolation without
1818  * cleaner shader, or '3' to enable process isolation without submitting the
1819  * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
1820  * for each partition. The input should specify the setting for all
1821  * partitions.
1822  *
1823  * Return: The number of bytes written to the sysfs file.
1824  */
1825 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1826 						struct device_attribute *attr,
1827 						const char *buf, size_t count)
1828 {
1829 	struct drm_device *ddev = dev_get_drvdata(dev);
1830 	struct amdgpu_device *adev = drm_to_adev(ddev);
1831 	long partition_values[MAX_XCP] = {0};
1832 	int ret, i, num_partitions;
1833 	const char *input_buf = buf;
1834 
1835 	for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1836 		ret = sscanf(input_buf, "%ld", &partition_values[i]);
1837 		if (ret <= 0)
1838 			break;
1839 
1840 		/* Move the pointer to the next value in the string */
1841 		input_buf = strchr(input_buf, ' ');
1842 		if (input_buf) {
1843 			input_buf++;
1844 		} else {
1845 			i++;
1846 			break;
1847 		}
1848 	}
1849 	num_partitions = i;
1850 
1851 	if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1852 		return -EINVAL;
1853 
1854 	if (!adev->xcp_mgr && num_partitions != 1)
1855 		return -EINVAL;
1856 
1857 	for (i = 0; i < num_partitions; i++) {
1858 		if (partition_values[i] != 0 &&
1859 		    partition_values[i] != 1 &&
1860 		    partition_values[i] != 2 &&
1861 		    partition_values[i] != 3)
1862 			return -EINVAL;
1863 	}
1864 
1865 	mutex_lock(&adev->enforce_isolation_mutex);
1866 	for (i = 0; i < num_partitions; i++) {
1867 		switch (partition_values[i]) {
1868 		case 0:
1869 		default:
1870 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1871 			break;
1872 		case 1:
1873 			adev->enforce_isolation[i] =
1874 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
1875 			break;
1876 		case 2:
1877 			adev->enforce_isolation[i] =
1878 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1879 			break;
1880 		case 3:
1881 			adev->enforce_isolation[i] =
1882 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1883 			break;
1884 		}
1885 	}
1886 	mutex_unlock(&adev->enforce_isolation_mutex);
1887 
1888 	amdgpu_mes_update_enforce_isolation(adev);
1889 
1890 	return count;
1891 }
1892 
1893 static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
1894 						struct device_attribute *attr,
1895 						char *buf)
1896 {
1897 	struct drm_device *ddev = dev_get_drvdata(dev);
1898 	struct amdgpu_device *adev = drm_to_adev(ddev);
1899 
1900 	if (!adev)
1901 		return -ENODEV;
1902 
1903 	return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
1904 }
1905 
1906 static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
1907 						struct device_attribute *attr,
1908 						char *buf)
1909 {
1910 	struct drm_device *ddev = dev_get_drvdata(dev);
1911 	struct amdgpu_device *adev = drm_to_adev(ddev);
1912 
1913 	if (!adev)
1914 		return -ENODEV;
1915 
1916 	return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
1917 }
1918 
1919 static DEVICE_ATTR(run_cleaner_shader, 0200,
1920 		   NULL, amdgpu_gfx_set_run_cleaner_shader);
1921 
1922 static DEVICE_ATTR(enforce_isolation, 0644,
1923 		   amdgpu_gfx_get_enforce_isolation,
1924 		   amdgpu_gfx_set_enforce_isolation);
1925 
1926 static DEVICE_ATTR(current_compute_partition, 0644,
1927 		   amdgpu_gfx_get_current_compute_partition,
1928 		   amdgpu_gfx_set_compute_partition);
1929 
1930 static DEVICE_ATTR(available_compute_partition, 0444,
1931 		   amdgpu_gfx_get_available_compute_partition, NULL);
1932 static DEVICE_ATTR(gfx_reset_mask, 0444,
1933 		   amdgpu_gfx_get_gfx_reset_mask, NULL);
1934 
1935 static DEVICE_ATTR(compute_reset_mask, 0444,
1936 		   amdgpu_gfx_get_compute_reset_mask, NULL);
1937 
1938 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
1939 {
1940 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1941 	bool xcp_switch_supported;
1942 	int r;
1943 
1944 	if (!xcp_mgr)
1945 		return 0;
1946 
1947 	xcp_switch_supported =
1948 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1949 
1950 	if (!xcp_switch_supported)
1951 		dev_attr_current_compute_partition.attr.mode &=
1952 			~(S_IWUSR | S_IWGRP | S_IWOTH);
1953 
1954 	r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1955 	if (r)
1956 		return r;
1957 
1958 	if (xcp_switch_supported)
1959 		r = device_create_file(adev->dev,
1960 				       &dev_attr_available_compute_partition);
1961 
1962 	return r;
1963 }
1964 
1965 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
1966 {
1967 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1968 	bool xcp_switch_supported;
1969 
1970 	if (!xcp_mgr)
1971 		return;
1972 
1973 	xcp_switch_supported =
1974 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1975 	device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1976 
1977 	if (xcp_switch_supported)
1978 		device_remove_file(adev->dev,
1979 				   &dev_attr_available_compute_partition);
1980 }
1981 
1982 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
1983 {
1984 	int r;
1985 
1986 	r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
1987 	if (r)
1988 		return r;
1989 	if (adev->gfx.enable_cleaner_shader)
1990 		r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
1991 
1992 	return r;
1993 }
1994 
1995 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
1996 {
1997 	device_remove_file(adev->dev, &dev_attr_enforce_isolation);
1998 	if (adev->gfx.enable_cleaner_shader)
1999 		device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
2000 }
2001 
2002 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
2003 {
2004 	int r = 0;
2005 
2006 	if (!amdgpu_gpu_recovery)
2007 		return r;
2008 
2009 	if (adev->gfx.num_gfx_rings) {
2010 		r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
2011 		if (r)
2012 			return r;
2013 	}
2014 
2015 	if (adev->gfx.num_compute_rings) {
2016 		r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
2017 		if (r)
2018 			return r;
2019 	}
2020 
2021 	return r;
2022 }
2023 
2024 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
2025 {
2026 	if (!amdgpu_gpu_recovery)
2027 		return;
2028 
2029 	if (adev->gfx.num_gfx_rings)
2030 		device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
2031 
2032 	if (adev->gfx.num_compute_rings)
2033 		device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
2034 }
2035 
2036 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
2037 {
2038 	int r;
2039 
2040 	r = amdgpu_gfx_sysfs_xcp_init(adev);
2041 	if (r) {
2042 		dev_err(adev->dev, "failed to create xcp sysfs files");
2043 		return r;
2044 	}
2045 
2046 	r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
2047 	if (r)
2048 		dev_err(adev->dev, "failed to create isolation sysfs files");
2049 
2050 	r = amdgpu_gfx_sysfs_reset_mask_init(adev);
2051 	if (r)
2052 		dev_err(adev->dev, "failed to create reset mask sysfs files");
2053 
2054 	return r;
2055 }
2056 
2057 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
2058 {
2059 	if (adev->dev->kobj.sd) {
2060 		amdgpu_gfx_sysfs_xcp_fini(adev);
2061 		amdgpu_gfx_sysfs_isolation_shader_fini(adev);
2062 		amdgpu_gfx_sysfs_reset_mask_fini(adev);
2063 	}
2064 }
2065 
2066 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
2067 				      unsigned int cleaner_shader_size)
2068 {
2069 	if (!adev->gfx.enable_cleaner_shader)
2070 		return -EOPNOTSUPP;
2071 
2072 	return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
2073 				       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
2074 				       &adev->gfx.cleaner_shader_obj,
2075 				       &adev->gfx.cleaner_shader_gpu_addr,
2076 				       (void **)&adev->gfx.cleaner_shader_cpu_ptr);
2077 }
2078 
2079 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
2080 {
2081 	if (!adev->gfx.enable_cleaner_shader)
2082 		return;
2083 
2084 	amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
2085 			      &adev->gfx.cleaner_shader_gpu_addr,
2086 			      (void **)&adev->gfx.cleaner_shader_cpu_ptr);
2087 }
2088 
2089 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
2090 				    unsigned int cleaner_shader_size,
2091 				    const void *cleaner_shader_ptr)
2092 {
2093 	if (!adev->gfx.enable_cleaner_shader)
2094 		return;
2095 
2096 	if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
2097 		memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
2098 			    cleaner_shader_size);
2099 }
2100 
2101 /**
2102  * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
2103  * @adev: amdgpu_device pointer
2104  * @idx: Index of the scheduler to control
2105  * @enable: Whether to enable or disable the KFD scheduler
2106  *
2107  * This function is used to control the KFD (Kernel Fusion Driver) scheduler
2108  * from the KGD. It is part of the cleaner shader feature. This function plays
2109  * a key role in enforcing process isolation on the GPU.
2110  *
2111  * The function uses a reference count mechanism (kfd_sch_req_count) to keep
2112  * track of the number of requests to enable the KFD scheduler. When a request
2113  * to enable the KFD scheduler is made, the reference count is decremented.
2114  * When the reference count reaches zero, a delayed work is scheduled to
2115  * enforce isolation after a delay of GFX_SLICE_PERIOD.
2116  *
2117  * When a request to disable the KFD scheduler is made, the function first
2118  * checks if the reference count is zero. If it is, it cancels the delayed work
2119  * for enforcing isolation and checks if the KFD scheduler is active. If the
2120  * KFD scheduler is active, it sends a request to stop the KFD scheduler and
2121  * sets the KFD scheduler state to inactive. Then, it increments the reference
2122  * count.
2123  *
2124  * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
2125  * scheduler state and reference count are updated atomically.
2126  *
2127  * Note: If the reference count is already zero when a request to enable the
2128  * KFD scheduler is made, it means there's an imbalance bug somewhere. The
2129  * function triggers a warning in this case.
2130  */
2131 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
2132 				    bool enable)
2133 {
2134 	mutex_lock(&adev->gfx.userq_sch_mutex);
2135 
2136 	if (enable) {
2137 		/* If the count is already 0, it means there's an imbalance bug somewhere.
2138 		 * Note that the bug may be in a different caller than the one which triggers the
2139 		 * WARN_ON_ONCE.
2140 		 */
2141 		if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
2142 			dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
2143 			goto unlock;
2144 		}
2145 
2146 		adev->gfx.userq_sch_req_count[idx]--;
2147 
2148 		if (adev->gfx.userq_sch_req_count[idx] == 0 &&
2149 		    adev->gfx.userq_sch_inactive[idx]) {
2150 			schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2151 					      msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
2152 		}
2153 	} else {
2154 		if (adev->gfx.userq_sch_req_count[idx] == 0) {
2155 			cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
2156 			if (!adev->gfx.userq_sch_inactive[idx]) {
2157 				amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
2158 				if (adev->kfd.init_complete)
2159 					amdgpu_amdkfd_stop_sched(adev, idx);
2160 				adev->gfx.userq_sch_inactive[idx] = true;
2161 			}
2162 		}
2163 
2164 		adev->gfx.userq_sch_req_count[idx]++;
2165 	}
2166 
2167 unlock:
2168 	mutex_unlock(&adev->gfx.userq_sch_mutex);
2169 }
2170 
2171 /**
2172  * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
2173  *
2174  * @work: work_struct.
2175  *
2176  * This function is the work handler for enforcing shader isolation on AMD GPUs.
2177  * It counts the number of emitted fences for each GFX and compute ring. If there
2178  * are any fences, it schedules the `enforce_isolation_work` to be run after a
2179  * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
2180  * Driver (KFD) to resume the runqueue. The function is synchronized using the
2181  * `enforce_isolation_mutex`.
2182  */
2183 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
2184 {
2185 	struct amdgpu_isolation_work *isolation_work =
2186 		container_of(work, struct amdgpu_isolation_work, work.work);
2187 	struct amdgpu_device *adev = isolation_work->adev;
2188 	u32 i, idx, fences = 0;
2189 
2190 	if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
2191 		idx = 0;
2192 	else
2193 		idx = isolation_work->xcp_id;
2194 
2195 	if (idx >= MAX_XCP)
2196 		return;
2197 
2198 	mutex_lock(&adev->enforce_isolation_mutex);
2199 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
2200 		if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
2201 			fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2202 	}
2203 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
2204 		if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
2205 			fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2206 	}
2207 	if (fences) {
2208 		/* we've already had our timeslice, so let's wrap this up */
2209 		schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2210 				      msecs_to_jiffies(1));
2211 	} else {
2212 		/* Tell KFD to resume the runqueue */
2213 		WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
2214 		WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
2215 
2216 		amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
2217 		if (adev->kfd.init_complete)
2218 			amdgpu_amdkfd_start_sched(adev, idx);
2219 		adev->gfx.userq_sch_inactive[idx] = false;
2220 	}
2221 	mutex_unlock(&adev->enforce_isolation_mutex);
2222 }
2223 
2224 /**
2225  * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
2226  * @adev: amdgpu_device pointer
2227  * @idx: Index of the GPU partition
2228  *
2229  * When kernel submissions come in, the jobs are given a time slice and once
2230  * that time slice is up, if there are KFD user queues active, kernel
2231  * submissions are blocked until KFD has had its time slice. Once the KFD time
2232  * slice is up, KFD user queues are preempted and kernel submissions are
2233  * unblocked and allowed to run again.
2234  */
2235 static void
2236 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
2237 					  u32 idx)
2238 {
2239 	unsigned long cjiffies;
2240 	bool wait = false;
2241 
2242 	mutex_lock(&adev->enforce_isolation_mutex);
2243 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2244 		/* set the initial values if nothing is set */
2245 		if (!adev->gfx.enforce_isolation_jiffies[idx]) {
2246 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2247 			adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
2248 		}
2249 		/* Make sure KFD gets a chance to run */
2250 		if (amdgpu_amdkfd_compute_active(adev, idx)) {
2251 			cjiffies = jiffies;
2252 			if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
2253 				cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
2254 				if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
2255 					/* if our time is up, let KGD work drain before scheduling more */
2256 					wait = true;
2257 					/* reset the timer period */
2258 					adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
2259 				} else {
2260 					/* set the timer period to what's left in our time slice */
2261 					adev->gfx.enforce_isolation_time[idx] =
2262 						GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
2263 				}
2264 			} else {
2265 				/* if jiffies wrap around we will just wait a little longer */
2266 				adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2267 			}
2268 		} else {
2269 			/* if there is no KFD work, then set the full slice period */
2270 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2271 			adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2272 		}
2273 	}
2274 	mutex_unlock(&adev->enforce_isolation_mutex);
2275 
2276 	if (wait)
2277 		msleep(GFX_SLICE_PERIOD_MS);
2278 }
2279 
2280 /**
2281  * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
2282  * @ring: Pointer to the amdgpu_ring structure
2283  *
2284  * Ring begin_use helper implementation for gfx which serializes access to the
2285  * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2286  * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2287  * each get a time slice when both are active.
2288  */
2289 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2290 {
2291 	struct amdgpu_device *adev = ring->adev;
2292 	u32 idx;
2293 	bool sched_work = false;
2294 
2295 	if (!adev->gfx.enable_cleaner_shader)
2296 		return;
2297 
2298 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2299 		idx = 0;
2300 	else
2301 		idx = ring->xcp_id;
2302 
2303 	if (idx >= MAX_XCP)
2304 		return;
2305 
2306 	/* Don't submit more work until KFD has had some time */
2307 	amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
2308 
2309 	mutex_lock(&adev->enforce_isolation_mutex);
2310 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2311 		if (adev->kfd.init_complete)
2312 			sched_work = true;
2313 	}
2314 	mutex_unlock(&adev->enforce_isolation_mutex);
2315 
2316 	if (sched_work)
2317 		amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
2318 }
2319 
2320 /**
2321  * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
2322  * @ring: Pointer to the amdgpu_ring structure
2323  *
2324  * Ring end_use helper implementation for gfx which serializes access to the
2325  * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2326  * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2327  * each get a time slice when both are active.
2328  */
2329 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2330 {
2331 	struct amdgpu_device *adev = ring->adev;
2332 	u32 idx;
2333 	bool sched_work = false;
2334 
2335 	if (!adev->gfx.enable_cleaner_shader)
2336 		return;
2337 
2338 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2339 		idx = 0;
2340 	else
2341 		idx = ring->xcp_id;
2342 
2343 	if (idx >= MAX_XCP)
2344 		return;
2345 
2346 	mutex_lock(&adev->enforce_isolation_mutex);
2347 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2348 		if (adev->kfd.init_complete)
2349 			sched_work = true;
2350 	}
2351 	mutex_unlock(&adev->enforce_isolation_mutex);
2352 
2353 	if (sched_work)
2354 		amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
2355 }
2356 
2357 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
2358 {
2359 	struct amdgpu_device *adev =
2360 		container_of(work, struct amdgpu_device, gfx.idle_work.work);
2361 	enum PP_SMC_POWER_PROFILE profile;
2362 	u32 i, fences = 0;
2363 	int r;
2364 
2365 	if (adev->gfx.num_gfx_rings)
2366 		profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2367 	else
2368 		profile = PP_SMC_POWER_PROFILE_COMPUTE;
2369 
2370 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
2371 		fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2372 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
2373 		fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2374 	if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
2375 		mutex_lock(&adev->gfx.workload_profile_mutex);
2376 		if (adev->gfx.workload_profile_active) {
2377 			r = amdgpu_dpm_switch_power_profile(adev, profile, false);
2378 			if (r)
2379 				dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2380 					 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2381 					 "fullscreen 3D" : "compute");
2382 			adev->gfx.workload_profile_active = false;
2383 		}
2384 		mutex_unlock(&adev->gfx.workload_profile_mutex);
2385 	} else {
2386 		schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2387 	}
2388 }
2389 
2390 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
2391 {
2392 	struct amdgpu_device *adev = ring->adev;
2393 	enum PP_SMC_POWER_PROFILE profile;
2394 	int r;
2395 
2396 	if (amdgpu_dpm_is_overdrive_enabled(adev))
2397 		return;
2398 
2399 	if (adev->gfx.num_gfx_rings)
2400 		profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2401 	else
2402 		profile = PP_SMC_POWER_PROFILE_COMPUTE;
2403 
2404 	atomic_inc(&adev->gfx.total_submission_cnt);
2405 
2406 	cancel_delayed_work_sync(&adev->gfx.idle_work);
2407 
2408 	/* We can safely return early here because we've cancelled the
2409 	 * the delayed work so there is no one else to set it to false
2410 	 * and we don't care if someone else sets it to true.
2411 	 */
2412 	if (adev->gfx.workload_profile_active)
2413 		return;
2414 
2415 	mutex_lock(&adev->gfx.workload_profile_mutex);
2416 	if (!adev->gfx.workload_profile_active) {
2417 		r = amdgpu_dpm_switch_power_profile(adev, profile, true);
2418 		if (r)
2419 			dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2420 				 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2421 				 "fullscreen 3D" : "compute");
2422 		adev->gfx.workload_profile_active = true;
2423 	}
2424 	mutex_unlock(&adev->gfx.workload_profile_mutex);
2425 }
2426 
2427 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
2428 {
2429 	struct amdgpu_device *adev = ring->adev;
2430 
2431 	if (amdgpu_dpm_is_overdrive_enabled(adev))
2432 		return;
2433 
2434 	atomic_dec(&ring->adev->gfx.total_submission_cnt);
2435 
2436 	schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2437 }
2438 
2439 /**
2440  * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
2441  *
2442  * @buffer: This is an output variable that gets the PACKET3 preamble setup.
2443  *
2444  * Return:
2445  * return the latest index.
2446  */
2447 u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
2448 {
2449 	u32 count = 0;
2450 
2451 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2452 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2453 
2454 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2455 	buffer[count++] = cpu_to_le32(0x80000000);
2456 	buffer[count++] = cpu_to_le32(0x80000000);
2457 
2458 	return count;
2459 }
2460 
2461 /**
2462  * amdgpu_gfx_csb_data_parser - Parser CS data
2463  *
2464  * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
2465  * @buffer: This is an output variable that gets the PACKET3 preamble end.
2466  * @count: Index to start set the preemble end.
2467  *
2468  * Return:
2469  * return the latest index.
2470  */
2471 u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
2472 {
2473 	const struct cs_section_def *sect = NULL;
2474 	const struct cs_extent_def *ext = NULL;
2475 	u32 i;
2476 
2477 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2478 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2479 			if (sect->id == SECT_CONTEXT) {
2480 				buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2481 				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2482 
2483 				for (i = 0; i < ext->reg_count; i++)
2484 					buffer[count++] = cpu_to_le32(ext->extent[i]);
2485 			}
2486 		}
2487 	}
2488 
2489 	return count;
2490 }
2491 
2492 /**
2493  * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
2494  *
2495  * @buffer: This is an output variable that gets the PACKET3 preamble end.
2496  * @count: Index to start set the preemble end.
2497  */
2498 void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
2499 {
2500 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2501 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
2502 
2503 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
2504 	buffer[count++] = cpu_to_le32(0);
2505 }
2506 
2507 /*
2508  * debugfs for to enable/disable gfx job submission to specific core.
2509  */
2510 #if defined(CONFIG_DEBUG_FS)
2511 static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
2512 {
2513 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2514 	u32 i;
2515 	u64 mask = 0;
2516 	struct amdgpu_ring *ring;
2517 
2518 	if (!adev)
2519 		return -ENODEV;
2520 
2521 	mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
2522 	if ((val & mask) == 0)
2523 		return -EINVAL;
2524 
2525 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2526 		ring = &adev->gfx.gfx_ring[i];
2527 		if (val & (1 << i))
2528 			ring->sched.ready = true;
2529 		else
2530 			ring->sched.ready = false;
2531 	}
2532 	/* publish sched.ready flag update effective immediately across smp */
2533 	smp_rmb();
2534 	return 0;
2535 }
2536 
2537 static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
2538 {
2539 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2540 	u32 i;
2541 	u64 mask = 0;
2542 	struct amdgpu_ring *ring;
2543 
2544 	if (!adev)
2545 		return -ENODEV;
2546 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2547 		ring = &adev->gfx.gfx_ring[i];
2548 		if (ring->sched.ready)
2549 			mask |= 1ULL << i;
2550 	}
2551 
2552 	*val = mask;
2553 	return 0;
2554 }
2555 
2556 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
2557 			 amdgpu_debugfs_gfx_sched_mask_get,
2558 			 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
2559 
2560 #endif
2561 
2562 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
2563 {
2564 #if defined(CONFIG_DEBUG_FS)
2565 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2566 	struct dentry *root = minor->debugfs_root;
2567 	char name[32];
2568 
2569 	if (!(adev->gfx.num_gfx_rings > 1))
2570 		return;
2571 	sprintf(name, "amdgpu_gfx_sched_mask");
2572 	debugfs_create_file(name, 0600, root, adev,
2573 			    &amdgpu_debugfs_gfx_sched_mask_fops);
2574 #endif
2575 }
2576 
2577 /*
2578  * debugfs for to enable/disable compute job submission to specific core.
2579  */
2580 #if defined(CONFIG_DEBUG_FS)
2581 static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
2582 {
2583 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2584 	u32 i;
2585 	u64 mask = 0;
2586 	struct amdgpu_ring *ring;
2587 
2588 	if (!adev)
2589 		return -ENODEV;
2590 
2591 	mask = (1ULL << adev->gfx.num_compute_rings) - 1;
2592 	if ((val & mask) == 0)
2593 		return -EINVAL;
2594 
2595 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2596 		ring = &adev->gfx.compute_ring[i];
2597 		if (val & (1 << i))
2598 			ring->sched.ready = true;
2599 		else
2600 			ring->sched.ready = false;
2601 	}
2602 
2603 	/* publish sched.ready flag update effective immediately across smp */
2604 	smp_rmb();
2605 	return 0;
2606 }
2607 
2608 static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
2609 {
2610 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2611 	u32 i;
2612 	u64 mask = 0;
2613 	struct amdgpu_ring *ring;
2614 
2615 	if (!adev)
2616 		return -ENODEV;
2617 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2618 		ring = &adev->gfx.compute_ring[i];
2619 		if (ring->sched.ready)
2620 			mask |= 1ULL << i;
2621 	}
2622 
2623 	*val = mask;
2624 	return 0;
2625 }
2626 
2627 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
2628 			 amdgpu_debugfs_compute_sched_mask_get,
2629 			 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
2630 
2631 #endif
2632 
2633 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
2634 {
2635 #if defined(CONFIG_DEBUG_FS)
2636 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2637 	struct dentry *root = minor->debugfs_root;
2638 	char name[32];
2639 
2640 	if (!(adev->gfx.num_compute_rings > 1))
2641 		return;
2642 	sprintf(name, "amdgpu_compute_sched_mask");
2643 	debugfs_create_file(name, 0600, root, adev,
2644 			    &amdgpu_debugfs_compute_sched_mask_fops);
2645 #endif
2646 }
2647 
2648