xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/pm_runtime.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_rlc.h"
32 #include "amdgpu_ras.h"
33 #include "amdgpu_reset.h"
34 #include "amdgpu_xcp.h"
35 #include "amdgpu_xgmi.h"
36 #include "nvd.h"
37 
38 /* delay 0.1 second to enable gfx off feature */
39 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
40 
41 #define GFX_OFF_NO_DELAY 0
42 
43 /*
44  * GPU GFX IP block helpers function.
45  */
46 
amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device * adev,int mec,int pipe,int queue)47 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
48 				int pipe, int queue)
49 {
50 	int bit = 0;
51 
52 	bit += mec * adev->gfx.mec.num_pipe_per_mec
53 		* adev->gfx.mec.num_queue_per_pipe;
54 	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
55 	bit += queue;
56 
57 	return bit;
58 }
59 
amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device * adev,int bit,int * mec,int * pipe,int * queue)60 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
61 				 int *mec, int *pipe, int *queue)
62 {
63 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
64 	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
65 		% adev->gfx.mec.num_pipe_per_mec;
66 	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
67 	       / adev->gfx.mec.num_pipe_per_mec;
68 
69 }
70 
amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device * adev,int xcc_id,int mec,int pipe,int queue)71 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
72 				     int xcc_id, int mec, int pipe, int queue)
73 {
74 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
75 			adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
76 }
77 
amdgpu_gfx_me_queue_to_bit(struct amdgpu_device * adev,int me,int pipe,int queue)78 static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
79 				      int me, int pipe, int queue)
80 {
81 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
82 	int bit = 0;
83 
84 	bit += me * adev->gfx.me.num_pipe_per_me
85 		* num_queue_per_pipe;
86 	bit += pipe * num_queue_per_pipe;
87 	bit += queue;
88 
89 	return bit;
90 }
91 
amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device * adev,int me,int pipe,int queue)92 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
93 				    int me, int pipe, int queue)
94 {
95 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
96 			adev->gfx.me.queue_bitmap);
97 }
98 
99 /**
100  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
101  *
102  * @mask: array in which the per-shader array disable masks will be stored
103  * @max_se: number of SEs
104  * @max_sh: number of SHs
105  *
106  * The bitmask of CUs to be disabled in the shader array determined by se and
107  * sh is stored in mask[se * max_sh + sh].
108  */
amdgpu_gfx_parse_disable_cu(unsigned int * mask,unsigned int max_se,unsigned int max_sh)109 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
110 {
111 	unsigned int se, sh, cu;
112 	const char *p;
113 
114 	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
115 
116 	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
117 		return;
118 
119 	p = amdgpu_disable_cu;
120 	for (;;) {
121 		char *next;
122 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
123 
124 		if (ret < 3) {
125 			DRM_ERROR("amdgpu: could not parse disable_cu\n");
126 			return;
127 		}
128 
129 		if (se < max_se && sh < max_sh && cu < 16) {
130 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
131 			mask[se * max_sh + sh] |= 1u << cu;
132 		} else {
133 			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
134 				  se, sh, cu);
135 		}
136 
137 		next = strchr(p, ',');
138 		if (!next)
139 			break;
140 		p = next + 1;
141 	}
142 }
143 
amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device * adev)144 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
145 {
146 	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
147 }
148 
amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device * adev)149 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
150 {
151 	if (amdgpu_compute_multipipe != -1) {
152 		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
153 			 amdgpu_compute_multipipe);
154 		return amdgpu_compute_multipipe == 1;
155 	}
156 
157 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
158 		return true;
159 
160 	/* FIXME: spreading the queues across pipes causes perf regressions
161 	 * on POLARIS11 compute workloads */
162 	if (adev->asic_type == CHIP_POLARIS11)
163 		return false;
164 
165 	return adev->gfx.mec.num_mec > 1;
166 }
167 
amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)168 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
169 						struct amdgpu_ring *ring)
170 {
171 	int queue = ring->queue;
172 	int pipe = ring->pipe;
173 
174 	/* Policy: use pipe1 queue0 as high priority graphics queue if we
175 	 * have more than one gfx pipe.
176 	 */
177 	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
178 	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
179 		int me = ring->me;
180 		int bit;
181 
182 		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
183 		if (ring == &adev->gfx.gfx_ring[bit])
184 			return true;
185 	}
186 
187 	return false;
188 }
189 
amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)190 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
191 					       struct amdgpu_ring *ring)
192 {
193 	/* Policy: use 1st queue as high priority compute queue if we
194 	 * have more than one compute queue.
195 	 */
196 	if (adev->gfx.num_compute_rings > 1 &&
197 	    ring == &adev->gfx.compute_ring[0])
198 		return true;
199 
200 	return false;
201 }
202 
amdgpu_gfx_compute_queue_acquire(struct amdgpu_device * adev)203 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
204 {
205 	int i, j, queue, pipe;
206 	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
207 	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
208 				     adev->gfx.mec.num_queue_per_pipe,
209 				     adev->gfx.num_compute_rings);
210 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
211 
212 	if (multipipe_policy) {
213 		/* policy: make queues evenly cross all pipes on MEC1 only
214 		 * for multiple xcc, just use the original policy for simplicity */
215 		for (j = 0; j < num_xcc; j++) {
216 			for (i = 0; i < max_queues_per_mec; i++) {
217 				pipe = i % adev->gfx.mec.num_pipe_per_mec;
218 				queue = (i / adev->gfx.mec.num_pipe_per_mec) %
219 					 adev->gfx.mec.num_queue_per_pipe;
220 
221 				set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
222 					adev->gfx.mec_bitmap[j].queue_bitmap);
223 			}
224 		}
225 	} else {
226 		/* policy: amdgpu owns all queues in the given pipe */
227 		for (j = 0; j < num_xcc; j++) {
228 			for (i = 0; i < max_queues_per_mec; ++i)
229 				set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
230 		}
231 	}
232 
233 	for (j = 0; j < num_xcc; j++) {
234 		dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
235 			bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
236 	}
237 }
238 
amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device * adev)239 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
240 {
241 	int i, queue, pipe;
242 	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
243 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
244 	int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
245 
246 	if (multipipe_policy) {
247 		/* policy: amdgpu owns the first queue per pipe at this stage
248 		 * will extend to mulitple queues per pipe later */
249 		for (i = 0; i < max_queues_per_me; i++) {
250 			pipe = i % adev->gfx.me.num_pipe_per_me;
251 			queue = (i / adev->gfx.me.num_pipe_per_me) %
252 				num_queue_per_pipe;
253 
254 			set_bit(pipe * num_queue_per_pipe + queue,
255 				adev->gfx.me.queue_bitmap);
256 		}
257 	} else {
258 		for (i = 0; i < max_queues_per_me; ++i)
259 			set_bit(i, adev->gfx.me.queue_bitmap);
260 	}
261 
262 	/* update the number of active graphics rings */
263 	if (adev->gfx.num_gfx_rings)
264 		adev->gfx.num_gfx_rings =
265 			bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
266 }
267 
amdgpu_gfx_kiq_acquire(struct amdgpu_device * adev,struct amdgpu_ring * ring,int xcc_id)268 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
269 				  struct amdgpu_ring *ring, int xcc_id)
270 {
271 	int queue_bit;
272 	int mec, pipe, queue;
273 
274 	queue_bit = adev->gfx.mec.num_mec
275 		    * adev->gfx.mec.num_pipe_per_mec
276 		    * adev->gfx.mec.num_queue_per_pipe;
277 
278 	while (--queue_bit >= 0) {
279 		if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
280 			continue;
281 
282 		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
283 
284 		/*
285 		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
286 		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
287 		 * only can be issued on queue 0.
288 		 */
289 		if ((mec == 1 && pipe > 1) || queue != 0)
290 			continue;
291 
292 		ring->me = mec + 1;
293 		ring->pipe = pipe;
294 		ring->queue = queue;
295 
296 		return 0;
297 	}
298 
299 	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
300 	return -EINVAL;
301 }
302 
amdgpu_gfx_kiq_init_ring(struct amdgpu_device * adev,int xcc_id)303 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
304 {
305 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
306 	struct amdgpu_irq_src *irq = &kiq->irq;
307 	struct amdgpu_ring *ring = &kiq->ring;
308 	int r = 0;
309 
310 	spin_lock_init(&kiq->ring_lock);
311 
312 	ring->adev = NULL;
313 	ring->ring_obj = NULL;
314 	ring->use_doorbell = true;
315 	ring->xcc_id = xcc_id;
316 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
317 	ring->doorbell_index =
318 		(adev->doorbell_index.kiq +
319 		 xcc_id * adev->doorbell_index.xcc_doorbell_range)
320 		<< 1;
321 
322 	r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
323 	if (r)
324 		return r;
325 
326 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
327 	ring->no_scheduler = true;
328 	snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
329 		 (unsigned char)xcc_id, (unsigned char)ring->me,
330 		 (unsigned char)ring->pipe, (unsigned char)ring->queue);
331 	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
332 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
333 	if (r)
334 		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
335 
336 	return r;
337 }
338 
amdgpu_gfx_kiq_free_ring(struct amdgpu_ring * ring)339 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
340 {
341 	amdgpu_ring_fini(ring);
342 }
343 
amdgpu_gfx_kiq_fini(struct amdgpu_device * adev,int xcc_id)344 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
345 {
346 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
347 
348 	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
349 }
350 
amdgpu_gfx_kiq_init(struct amdgpu_device * adev,unsigned int hpd_size,int xcc_id)351 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
352 			unsigned int hpd_size, int xcc_id)
353 {
354 	int r;
355 	u32 *hpd;
356 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
357 
358 	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
359 				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
360 				    &kiq->eop_gpu_addr, (void **)&hpd);
361 	if (r) {
362 		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
363 		return r;
364 	}
365 
366 	memset(hpd, 0, hpd_size);
367 
368 	r = amdgpu_bo_reserve(kiq->eop_obj, true);
369 	if (unlikely(r != 0))
370 		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
371 	amdgpu_bo_kunmap(kiq->eop_obj);
372 	amdgpu_bo_unreserve(kiq->eop_obj);
373 
374 	return 0;
375 }
376 
377 /* create MQD for each compute/gfx queue */
amdgpu_gfx_mqd_sw_init(struct amdgpu_device * adev,unsigned int mqd_size,int xcc_id)378 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
379 			   unsigned int mqd_size, int xcc_id)
380 {
381 	int r, i, j;
382 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
383 	struct amdgpu_ring *ring = &kiq->ring;
384 	u32 domain = AMDGPU_GEM_DOMAIN_GTT;
385 
386 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
387 	/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
388 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
389 		domain |= AMDGPU_GEM_DOMAIN_VRAM;
390 #endif
391 
392 	/* create MQD for KIQ */
393 	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
394 		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
395 		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
396 		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
397 		 * KIQ MQD no matter SRIOV or Bare-metal
398 		 */
399 		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
400 					    AMDGPU_GEM_DOMAIN_VRAM |
401 					    AMDGPU_GEM_DOMAIN_GTT,
402 					    &ring->mqd_obj,
403 					    &ring->mqd_gpu_addr,
404 					    &ring->mqd_ptr);
405 		if (r) {
406 			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
407 			return r;
408 		}
409 
410 		/* prepare MQD backup */
411 		kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
412 		if (!kiq->mqd_backup) {
413 			dev_warn(adev->dev,
414 				 "no memory to create MQD backup for ring %s\n", ring->name);
415 			return -ENOMEM;
416 		}
417 	}
418 
419 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
420 		/* create MQD for each KGQ */
421 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
422 			ring = &adev->gfx.gfx_ring[i];
423 			if (!ring->mqd_obj) {
424 				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
425 							    domain, &ring->mqd_obj,
426 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
427 				if (r) {
428 					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
429 					return r;
430 				}
431 
432 				ring->mqd_size = mqd_size;
433 				/* prepare MQD backup */
434 				adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
435 				if (!adev->gfx.me.mqd_backup[i]) {
436 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
437 					return -ENOMEM;
438 				}
439 			}
440 		}
441 	}
442 
443 	/* create MQD for each KCQ */
444 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
445 		j = i + xcc_id * adev->gfx.num_compute_rings;
446 		ring = &adev->gfx.compute_ring[j];
447 		if (!ring->mqd_obj) {
448 			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
449 						    domain, &ring->mqd_obj,
450 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
451 			if (r) {
452 				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
453 				return r;
454 			}
455 
456 			ring->mqd_size = mqd_size;
457 			/* prepare MQD backup */
458 			adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
459 			if (!adev->gfx.mec.mqd_backup[j]) {
460 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
461 				return -ENOMEM;
462 			}
463 		}
464 	}
465 
466 	return 0;
467 }
468 
amdgpu_gfx_mqd_sw_fini(struct amdgpu_device * adev,int xcc_id)469 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
470 {
471 	struct amdgpu_ring *ring = NULL;
472 	int i, j;
473 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
474 
475 	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
476 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
477 			ring = &adev->gfx.gfx_ring[i];
478 			kfree(adev->gfx.me.mqd_backup[i]);
479 			amdgpu_bo_free_kernel(&ring->mqd_obj,
480 					      &ring->mqd_gpu_addr,
481 					      &ring->mqd_ptr);
482 		}
483 	}
484 
485 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
486 		j = i + xcc_id * adev->gfx.num_compute_rings;
487 		ring = &adev->gfx.compute_ring[j];
488 		kfree(adev->gfx.mec.mqd_backup[j]);
489 		amdgpu_bo_free_kernel(&ring->mqd_obj,
490 				      &ring->mqd_gpu_addr,
491 				      &ring->mqd_ptr);
492 	}
493 
494 	ring = &kiq->ring;
495 	kfree(kiq->mqd_backup);
496 	amdgpu_bo_free_kernel(&ring->mqd_obj,
497 			      &ring->mqd_gpu_addr,
498 			      &ring->mqd_ptr);
499 }
500 
amdgpu_gfx_disable_kcq(struct amdgpu_device * adev,int xcc_id)501 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
502 {
503 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
504 	struct amdgpu_ring *kiq_ring = &kiq->ring;
505 	int i, r = 0;
506 	int j;
507 
508 	if (adev->enable_mes) {
509 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
510 			j = i + xcc_id * adev->gfx.num_compute_rings;
511 			amdgpu_mes_unmap_legacy_queue(adev,
512 						   &adev->gfx.compute_ring[j],
513 						   RESET_QUEUES, 0, 0);
514 		}
515 		return 0;
516 	}
517 
518 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
519 		return -EINVAL;
520 
521 	if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
522 		return 0;
523 
524 	spin_lock(&kiq->ring_lock);
525 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
526 					adev->gfx.num_compute_rings)) {
527 		spin_unlock(&kiq->ring_lock);
528 		return -ENOMEM;
529 	}
530 
531 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
532 		j = i + xcc_id * adev->gfx.num_compute_rings;
533 		kiq->pmf->kiq_unmap_queues(kiq_ring,
534 					   &adev->gfx.compute_ring[j],
535 					   RESET_QUEUES, 0, 0);
536 	}
537 	/* Submit unmap queue packet */
538 	amdgpu_ring_commit(kiq_ring);
539 	/*
540 	 * Ring test will do a basic scratch register change check. Just run
541 	 * this to ensure that unmap queues that is submitted before got
542 	 * processed successfully before returning.
543 	 */
544 	r = amdgpu_ring_test_helper(kiq_ring);
545 
546 	spin_unlock(&kiq->ring_lock);
547 
548 	return r;
549 }
550 
amdgpu_gfx_disable_kgq(struct amdgpu_device * adev,int xcc_id)551 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
552 {
553 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
554 	struct amdgpu_ring *kiq_ring = &kiq->ring;
555 	int i, r = 0;
556 	int j;
557 
558 	if (adev->enable_mes) {
559 		if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
560 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
561 				j = i + xcc_id * adev->gfx.num_gfx_rings;
562 				amdgpu_mes_unmap_legacy_queue(adev,
563 						      &adev->gfx.gfx_ring[j],
564 						      PREEMPT_QUEUES, 0, 0);
565 			}
566 		}
567 		return 0;
568 	}
569 
570 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
571 		return -EINVAL;
572 
573 	if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
574 		return 0;
575 
576 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
577 		spin_lock(&kiq->ring_lock);
578 		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
579 						adev->gfx.num_gfx_rings)) {
580 			spin_unlock(&kiq->ring_lock);
581 			return -ENOMEM;
582 		}
583 
584 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
585 			j = i + xcc_id * adev->gfx.num_gfx_rings;
586 			kiq->pmf->kiq_unmap_queues(kiq_ring,
587 						   &adev->gfx.gfx_ring[j],
588 						   PREEMPT_QUEUES, 0, 0);
589 		}
590 		/* Submit unmap queue packet */
591 		amdgpu_ring_commit(kiq_ring);
592 
593 		/*
594 		 * Ring test will do a basic scratch register change check.
595 		 * Just run this to ensure that unmap queues that is submitted
596 		 * before got processed successfully before returning.
597 		 */
598 		r = amdgpu_ring_test_helper(kiq_ring);
599 		spin_unlock(&kiq->ring_lock);
600 	}
601 
602 	return r;
603 }
604 
amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device * adev,int queue_bit)605 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
606 					int queue_bit)
607 {
608 	int mec, pipe, queue;
609 	int set_resource_bit = 0;
610 
611 	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
612 
613 	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
614 
615 	return set_resource_bit;
616 }
617 
amdgpu_gfx_mes_enable_kcq(struct amdgpu_device * adev,int xcc_id)618 static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
619 {
620 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
621 	struct amdgpu_ring *kiq_ring = &kiq->ring;
622 	uint64_t queue_mask = ~0ULL;
623 	int r, i, j;
624 
625 	amdgpu_device_flush_hdp(adev, NULL);
626 
627 	if (!adev->enable_uni_mes) {
628 		spin_lock(&kiq->ring_lock);
629 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
630 		if (r) {
631 			dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
632 			spin_unlock(&kiq->ring_lock);
633 			return r;
634 		}
635 
636 		kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
637 		r = amdgpu_ring_test_helper(kiq_ring);
638 		spin_unlock(&kiq->ring_lock);
639 		if (r)
640 			dev_err(adev->dev, "KIQ failed to set resources\n");
641 	}
642 
643 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
644 		j = i + xcc_id * adev->gfx.num_compute_rings;
645 		r = amdgpu_mes_map_legacy_queue(adev,
646 						&adev->gfx.compute_ring[j]);
647 		if (r) {
648 			dev_err(adev->dev, "failed to map compute queue\n");
649 			return r;
650 		}
651 	}
652 
653 	return 0;
654 }
655 
amdgpu_gfx_enable_kcq(struct amdgpu_device * adev,int xcc_id)656 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
657 {
658 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
659 	struct amdgpu_ring *kiq_ring = &kiq->ring;
660 	uint64_t queue_mask = 0;
661 	int r, i, j;
662 
663 	if (adev->mes.enable_legacy_queue_map)
664 		return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
665 
666 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
667 		return -EINVAL;
668 
669 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
670 		if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
671 			continue;
672 
673 		/* This situation may be hit in the future if a new HW
674 		 * generation exposes more than 64 queues. If so, the
675 		 * definition of queue_mask needs updating */
676 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
677 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
678 			break;
679 		}
680 
681 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
682 	}
683 
684 	amdgpu_device_flush_hdp(adev, NULL);
685 
686 	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
687 		 kiq_ring->queue);
688 
689 	spin_lock(&kiq->ring_lock);
690 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
691 					adev->gfx.num_compute_rings +
692 					kiq->pmf->set_resources_size);
693 	if (r) {
694 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
695 		spin_unlock(&kiq->ring_lock);
696 		return r;
697 	}
698 
699 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
700 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
701 		j = i + xcc_id * adev->gfx.num_compute_rings;
702 		kiq->pmf->kiq_map_queues(kiq_ring,
703 					 &adev->gfx.compute_ring[j]);
704 	}
705 	/* Submit map queue packet */
706 	amdgpu_ring_commit(kiq_ring);
707 	/*
708 	 * Ring test will do a basic scratch register change check. Just run
709 	 * this to ensure that map queues that is submitted before got
710 	 * processed successfully before returning.
711 	 */
712 	r = amdgpu_ring_test_helper(kiq_ring);
713 	spin_unlock(&kiq->ring_lock);
714 	if (r)
715 		DRM_ERROR("KCQ enable failed\n");
716 
717 	return r;
718 }
719 
amdgpu_gfx_enable_kgq(struct amdgpu_device * adev,int xcc_id)720 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
721 {
722 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
723 	struct amdgpu_ring *kiq_ring = &kiq->ring;
724 	int r, i, j;
725 
726 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
727 		return -EINVAL;
728 
729 	amdgpu_device_flush_hdp(adev, NULL);
730 
731 	if (adev->mes.enable_legacy_queue_map) {
732 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
733 			j = i + xcc_id * adev->gfx.num_gfx_rings;
734 			r = amdgpu_mes_map_legacy_queue(adev,
735 							&adev->gfx.gfx_ring[j]);
736 			if (r) {
737 				DRM_ERROR("failed to map gfx queue\n");
738 				return r;
739 			}
740 		}
741 
742 		return 0;
743 	}
744 
745 	spin_lock(&kiq->ring_lock);
746 	/* No need to map kcq on the slave */
747 	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
748 		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
749 						adev->gfx.num_gfx_rings);
750 		if (r) {
751 			DRM_ERROR("Failed to lock KIQ (%d).\n", r);
752 			spin_unlock(&kiq->ring_lock);
753 			return r;
754 		}
755 
756 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
757 			j = i + xcc_id * adev->gfx.num_gfx_rings;
758 			kiq->pmf->kiq_map_queues(kiq_ring,
759 						 &adev->gfx.gfx_ring[j]);
760 		}
761 	}
762 	/* Submit map queue packet */
763 	amdgpu_ring_commit(kiq_ring);
764 	/*
765 	 * Ring test will do a basic scratch register change check. Just run
766 	 * this to ensure that map queues that is submitted before got
767 	 * processed successfully before returning.
768 	 */
769 	r = amdgpu_ring_test_helper(kiq_ring);
770 	spin_unlock(&kiq->ring_lock);
771 	if (r)
772 		DRM_ERROR("KGQ enable failed\n");
773 
774 	return r;
775 }
776 
amdgpu_gfx_do_off_ctrl(struct amdgpu_device * adev,bool enable,bool no_delay)777 static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
778 				   bool no_delay)
779 {
780 	unsigned long delay = GFX_OFF_DELAY_ENABLE;
781 
782 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
783 		return;
784 
785 	mutex_lock(&adev->gfx.gfx_off_mutex);
786 
787 	if (enable) {
788 		/* If the count is already 0, it means there's an imbalance bug somewhere.
789 		 * Note that the bug may be in a different caller than the one which triggers the
790 		 * WARN_ON_ONCE.
791 		 */
792 		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
793 			goto unlock;
794 
795 		adev->gfx.gfx_off_req_count--;
796 
797 		if (adev->gfx.gfx_off_req_count == 0 &&
798 		    !adev->gfx.gfx_off_state) {
799 			/* If going to s2idle, no need to wait */
800 			if (no_delay) {
801 				if (!amdgpu_dpm_set_powergating_by_smu(adev,
802 						AMD_IP_BLOCK_TYPE_GFX, true, 0))
803 					adev->gfx.gfx_off_state = true;
804 			} else {
805 				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
806 					      delay);
807 			}
808 		}
809 	} else {
810 		if (adev->gfx.gfx_off_req_count == 0) {
811 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
812 
813 			if (adev->gfx.gfx_off_state &&
814 			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
815 				adev->gfx.gfx_off_state = false;
816 
817 				if (adev->gfx.funcs->init_spm_golden) {
818 					dev_dbg(adev->dev,
819 						"GFXOFF is disabled, re-init SPM golden settings\n");
820 					amdgpu_gfx_init_spm_golden(adev);
821 				}
822 			}
823 		}
824 
825 		adev->gfx.gfx_off_req_count++;
826 	}
827 
828 unlock:
829 	mutex_unlock(&adev->gfx.gfx_off_mutex);
830 }
831 
832 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
833  *
834  * @adev: amdgpu_device pointer
835  * @bool enable true: enable gfx off feature, false: disable gfx off feature
836  *
837  * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
838  * 2. other client can send request to disable gfx off feature, the request should be honored.
839  * 3. other client can cancel their request of disable gfx off feature
840  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
841  *
842  * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
843  */
amdgpu_gfx_off_ctrl(struct amdgpu_device * adev,bool enable)844 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
845 {
846 	/* If going to s2idle, no need to wait */
847 	bool no_delay = adev->in_s0ix ? true : false;
848 
849 	amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
850 }
851 
852 /* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
853  *
854  * @adev: amdgpu_device pointer
855  * @bool enable true: enable gfx off feature, false: disable gfx off feature
856  *
857  * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
858  * 2. other client can send request to disable gfx off feature, the request should be honored.
859  * 3. other client can cancel their request of disable gfx off feature
860  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
861  *
862  * gfx off allow will be issued immediately.
863  */
amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device * adev,bool enable)864 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
865 {
866 	amdgpu_gfx_do_off_ctrl(adev, enable, true);
867 }
868 
amdgpu_set_gfx_off_residency(struct amdgpu_device * adev,bool value)869 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
870 {
871 	int r = 0;
872 
873 	mutex_lock(&adev->gfx.gfx_off_mutex);
874 
875 	r = amdgpu_dpm_set_residency_gfxoff(adev, value);
876 
877 	mutex_unlock(&adev->gfx.gfx_off_mutex);
878 
879 	return r;
880 }
881 
amdgpu_get_gfx_off_residency(struct amdgpu_device * adev,u32 * value)882 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
883 {
884 	int r = 0;
885 
886 	mutex_lock(&adev->gfx.gfx_off_mutex);
887 
888 	r = amdgpu_dpm_get_residency_gfxoff(adev, value);
889 
890 	mutex_unlock(&adev->gfx.gfx_off_mutex);
891 
892 	return r;
893 }
894 
amdgpu_get_gfx_off_entrycount(struct amdgpu_device * adev,u64 * value)895 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
896 {
897 	int r = 0;
898 
899 	mutex_lock(&adev->gfx.gfx_off_mutex);
900 
901 	r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
902 
903 	mutex_unlock(&adev->gfx.gfx_off_mutex);
904 
905 	return r;
906 }
907 
amdgpu_get_gfx_off_status(struct amdgpu_device * adev,uint32_t * value)908 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
909 {
910 
911 	int r = 0;
912 
913 	mutex_lock(&adev->gfx.gfx_off_mutex);
914 
915 	r = amdgpu_dpm_get_status_gfxoff(adev, value);
916 
917 	mutex_unlock(&adev->gfx.gfx_off_mutex);
918 
919 	return r;
920 }
921 
amdgpu_gfx_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)922 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
923 {
924 	int r;
925 
926 	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
927 		if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
928 			r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
929 			if (r)
930 				return r;
931 		}
932 
933 		r = amdgpu_ras_block_late_init(adev, ras_block);
934 		if (r)
935 			return r;
936 
937 		if (amdgpu_sriov_vf(adev))
938 			return r;
939 
940 		if (adev->gfx.cp_ecc_error_irq.funcs) {
941 			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
942 			if (r)
943 				goto late_fini;
944 		}
945 	} else {
946 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
947 	}
948 
949 	return 0;
950 late_fini:
951 	amdgpu_ras_block_late_fini(adev, ras_block);
952 	return r;
953 }
954 
amdgpu_gfx_ras_sw_init(struct amdgpu_device * adev)955 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
956 {
957 	int err = 0;
958 	struct amdgpu_gfx_ras *ras = NULL;
959 
960 	/* adev->gfx.ras is NULL, which means gfx does not
961 	 * support ras function, then do nothing here.
962 	 */
963 	if (!adev->gfx.ras)
964 		return 0;
965 
966 	ras = adev->gfx.ras;
967 
968 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
969 	if (err) {
970 		dev_err(adev->dev, "Failed to register gfx ras block!\n");
971 		return err;
972 	}
973 
974 	strcpy(ras->ras_block.ras_comm.name, "gfx");
975 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
976 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
977 	adev->gfx.ras_if = &ras->ras_block.ras_comm;
978 
979 	/* If not define special ras_late_init function, use gfx default ras_late_init */
980 	if (!ras->ras_block.ras_late_init)
981 		ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
982 
983 	/* If not defined special ras_cb function, use default ras_cb */
984 	if (!ras->ras_block.ras_cb)
985 		ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
986 
987 	return 0;
988 }
989 
amdgpu_gfx_poison_consumption_handler(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)990 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
991 						struct amdgpu_iv_entry *entry)
992 {
993 	if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
994 		return adev->gfx.ras->poison_consumption_handler(adev, entry);
995 
996 	return 0;
997 }
998 
amdgpu_gfx_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)999 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
1000 		void *err_data,
1001 		struct amdgpu_iv_entry *entry)
1002 {
1003 	/* TODO ue will trigger an interrupt.
1004 	 *
1005 	 * When “Full RAS” is enabled, the per-IP interrupt sources should
1006 	 * be disabled and the driver should only look for the aggregated
1007 	 * interrupt via sync flood
1008 	 */
1009 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
1010 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
1011 		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
1012 		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
1013 			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1014 		amdgpu_ras_reset_gpu(adev);
1015 	}
1016 	return AMDGPU_RAS_SUCCESS;
1017 }
1018 
amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1019 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
1020 				  struct amdgpu_irq_src *source,
1021 				  struct amdgpu_iv_entry *entry)
1022 {
1023 	struct ras_common_if *ras_if = adev->gfx.ras_if;
1024 	struct ras_dispatch_if ih_data = {
1025 		.entry = entry,
1026 	};
1027 
1028 	if (!ras_if)
1029 		return 0;
1030 
1031 	ih_data.head = *ras_if;
1032 
1033 	DRM_ERROR("CP ECC ERROR IRQ\n");
1034 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1035 	return 0;
1036 }
1037 
amdgpu_gfx_ras_error_func(struct amdgpu_device * adev,void * ras_error_status,void (* func)(struct amdgpu_device * adev,void * ras_error_status,int xcc_id))1038 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
1039 		void *ras_error_status,
1040 		void (*func)(struct amdgpu_device *adev, void *ras_error_status,
1041 				int xcc_id))
1042 {
1043 	int i;
1044 	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
1045 	uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
1046 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1047 
1048 	if (err_data) {
1049 		err_data->ue_count = 0;
1050 		err_data->ce_count = 0;
1051 	}
1052 
1053 	for_each_inst(i, xcc_mask)
1054 		func(adev, ras_error_status, i);
1055 }
1056 
amdgpu_kiq_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t xcc_id)1057 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
1058 {
1059 	signed long r, cnt = 0;
1060 	unsigned long flags;
1061 	uint32_t seq, reg_val_offs = 0, value = 0;
1062 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1063 	struct amdgpu_ring *ring = &kiq->ring;
1064 
1065 	if (amdgpu_device_skip_hw_access(adev))
1066 		return 0;
1067 
1068 	if (adev->mes.ring[0].sched.ready)
1069 		return amdgpu_mes_rreg(adev, reg);
1070 
1071 	BUG_ON(!ring->funcs->emit_rreg);
1072 
1073 	spin_lock_irqsave(&kiq->ring_lock, flags);
1074 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
1075 		pr_err("critical bug! too many kiq readers\n");
1076 		goto failed_unlock;
1077 	}
1078 	r = amdgpu_ring_alloc(ring, 32);
1079 	if (r)
1080 		goto failed_unlock;
1081 
1082 	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
1083 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1084 	if (r)
1085 		goto failed_undo;
1086 
1087 	amdgpu_ring_commit(ring);
1088 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1089 
1090 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1091 
1092 	/* don't wait anymore for gpu reset case because this way may
1093 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1094 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1095 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1096 	 * gpu_recover() hang there.
1097 	 *
1098 	 * also don't wait anymore for IRQ context
1099 	 * */
1100 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1101 		goto failed_kiq_read;
1102 
1103 	might_sleep();
1104 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1105 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1106 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1107 	}
1108 
1109 	if (cnt > MAX_KIQ_REG_TRY)
1110 		goto failed_kiq_read;
1111 
1112 	mb();
1113 	value = adev->wb.wb[reg_val_offs];
1114 	amdgpu_device_wb_free(adev, reg_val_offs);
1115 	return value;
1116 
1117 failed_undo:
1118 	amdgpu_ring_undo(ring);
1119 failed_unlock:
1120 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1121 failed_kiq_read:
1122 	if (reg_val_offs)
1123 		amdgpu_device_wb_free(adev, reg_val_offs);
1124 	dev_err(adev->dev, "failed to read reg:%x\n", reg);
1125 	return ~0;
1126 }
1127 
amdgpu_kiq_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t xcc_id)1128 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
1129 {
1130 	signed long r, cnt = 0;
1131 	unsigned long flags;
1132 	uint32_t seq;
1133 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
1134 	struct amdgpu_ring *ring = &kiq->ring;
1135 
1136 	BUG_ON(!ring->funcs->emit_wreg);
1137 
1138 	if (amdgpu_device_skip_hw_access(adev))
1139 		return;
1140 
1141 	if (adev->mes.ring[0].sched.ready) {
1142 		amdgpu_mes_wreg(adev, reg, v);
1143 		return;
1144 	}
1145 
1146 	spin_lock_irqsave(&kiq->ring_lock, flags);
1147 	r = amdgpu_ring_alloc(ring, 32);
1148 	if (r)
1149 		goto failed_unlock;
1150 
1151 	amdgpu_ring_emit_wreg(ring, reg, v);
1152 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1153 	if (r)
1154 		goto failed_undo;
1155 
1156 	amdgpu_ring_commit(ring);
1157 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1158 
1159 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1160 
1161 	/* don't wait anymore for gpu reset case because this way may
1162 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1163 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1164 	 * never return if we keep waiting in virt_kiq_rreg, which cause
1165 	 * gpu_recover() hang there.
1166 	 *
1167 	 * also don't wait anymore for IRQ context
1168 	 * */
1169 	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1170 		goto failed_kiq_write;
1171 
1172 	might_sleep();
1173 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1174 
1175 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1176 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1177 	}
1178 
1179 	if (cnt > MAX_KIQ_REG_TRY)
1180 		goto failed_kiq_write;
1181 
1182 	return;
1183 
1184 failed_undo:
1185 	amdgpu_ring_undo(ring);
1186 failed_unlock:
1187 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
1188 failed_kiq_write:
1189 	dev_err(adev->dev, "failed to write reg:%x\n", reg);
1190 }
1191 
amdgpu_gfx_get_num_kcq(struct amdgpu_device * adev)1192 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1193 {
1194 	if (amdgpu_num_kcq == -1) {
1195 		return 8;
1196 	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1197 		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1198 		return 8;
1199 	}
1200 	return amdgpu_num_kcq;
1201 }
1202 
amdgpu_gfx_cp_init_microcode(struct amdgpu_device * adev,uint32_t ucode_id)1203 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1204 				  uint32_t ucode_id)
1205 {
1206 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1207 	const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1208 	struct amdgpu_firmware_info *info = NULL;
1209 	const struct firmware *ucode_fw;
1210 	unsigned int fw_size;
1211 
1212 	switch (ucode_id) {
1213 	case AMDGPU_UCODE_ID_CP_PFP:
1214 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1215 			adev->gfx.pfp_fw->data;
1216 		adev->gfx.pfp_fw_version =
1217 			le32_to_cpu(cp_hdr->header.ucode_version);
1218 		adev->gfx.pfp_feature_version =
1219 			le32_to_cpu(cp_hdr->ucode_feature_version);
1220 		ucode_fw = adev->gfx.pfp_fw;
1221 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1222 		break;
1223 	case AMDGPU_UCODE_ID_CP_RS64_PFP:
1224 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1225 			adev->gfx.pfp_fw->data;
1226 		adev->gfx.pfp_fw_version =
1227 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1228 		adev->gfx.pfp_feature_version =
1229 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1230 		ucode_fw = adev->gfx.pfp_fw;
1231 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1232 		break;
1233 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1234 	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1235 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1236 			adev->gfx.pfp_fw->data;
1237 		ucode_fw = adev->gfx.pfp_fw;
1238 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1239 		break;
1240 	case AMDGPU_UCODE_ID_CP_ME:
1241 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1242 			adev->gfx.me_fw->data;
1243 		adev->gfx.me_fw_version =
1244 			le32_to_cpu(cp_hdr->header.ucode_version);
1245 		adev->gfx.me_feature_version =
1246 			le32_to_cpu(cp_hdr->ucode_feature_version);
1247 		ucode_fw = adev->gfx.me_fw;
1248 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1249 		break;
1250 	case AMDGPU_UCODE_ID_CP_RS64_ME:
1251 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1252 			adev->gfx.me_fw->data;
1253 		adev->gfx.me_fw_version =
1254 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1255 		adev->gfx.me_feature_version =
1256 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1257 		ucode_fw = adev->gfx.me_fw;
1258 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1259 		break;
1260 	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1261 	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1262 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1263 			adev->gfx.me_fw->data;
1264 		ucode_fw = adev->gfx.me_fw;
1265 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1266 		break;
1267 	case AMDGPU_UCODE_ID_CP_CE:
1268 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1269 			adev->gfx.ce_fw->data;
1270 		adev->gfx.ce_fw_version =
1271 			le32_to_cpu(cp_hdr->header.ucode_version);
1272 		adev->gfx.ce_feature_version =
1273 			le32_to_cpu(cp_hdr->ucode_feature_version);
1274 		ucode_fw = adev->gfx.ce_fw;
1275 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1276 		break;
1277 	case AMDGPU_UCODE_ID_CP_MEC1:
1278 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1279 			adev->gfx.mec_fw->data;
1280 		adev->gfx.mec_fw_version =
1281 			le32_to_cpu(cp_hdr->header.ucode_version);
1282 		adev->gfx.mec_feature_version =
1283 			le32_to_cpu(cp_hdr->ucode_feature_version);
1284 		ucode_fw = adev->gfx.mec_fw;
1285 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1286 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1287 		break;
1288 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1289 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1290 			adev->gfx.mec_fw->data;
1291 		ucode_fw = adev->gfx.mec_fw;
1292 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1293 		break;
1294 	case AMDGPU_UCODE_ID_CP_MEC2:
1295 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1296 			adev->gfx.mec2_fw->data;
1297 		adev->gfx.mec2_fw_version =
1298 			le32_to_cpu(cp_hdr->header.ucode_version);
1299 		adev->gfx.mec2_feature_version =
1300 			le32_to_cpu(cp_hdr->ucode_feature_version);
1301 		ucode_fw = adev->gfx.mec2_fw;
1302 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1303 			  le32_to_cpu(cp_hdr->jt_size) * 4;
1304 		break;
1305 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1306 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1307 			adev->gfx.mec2_fw->data;
1308 		ucode_fw = adev->gfx.mec2_fw;
1309 		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1310 		break;
1311 	case AMDGPU_UCODE_ID_CP_RS64_MEC:
1312 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1313 			adev->gfx.mec_fw->data;
1314 		adev->gfx.mec_fw_version =
1315 			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1316 		adev->gfx.mec_feature_version =
1317 			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1318 		ucode_fw = adev->gfx.mec_fw;
1319 		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1320 		break;
1321 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1322 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1323 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1324 	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1325 		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1326 			adev->gfx.mec_fw->data;
1327 		ucode_fw = adev->gfx.mec_fw;
1328 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1329 		break;
1330 	default:
1331 		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
1332 		return;
1333 	}
1334 
1335 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1336 		info = &adev->firmware.ucode[ucode_id];
1337 		info->ucode_id = ucode_id;
1338 		info->fw = ucode_fw;
1339 		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1340 	}
1341 }
1342 
amdgpu_gfx_is_master_xcc(struct amdgpu_device * adev,int xcc_id)1343 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1344 {
1345 	return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1346 			adev->gfx.num_xcc_per_xcp : 1));
1347 }
1348 
amdgpu_gfx_get_current_compute_partition(struct device * dev,struct device_attribute * addr,char * buf)1349 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1350 						struct device_attribute *addr,
1351 						char *buf)
1352 {
1353 	struct drm_device *ddev = dev_get_drvdata(dev);
1354 	struct amdgpu_device *adev = drm_to_adev(ddev);
1355 	int mode;
1356 
1357 	/* Only minimal precaution taken to reject requests while in reset.*/
1358 	if (amdgpu_in_reset(adev))
1359 		return -EPERM;
1360 
1361 	mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1362 					       AMDGPU_XCP_FL_NONE);
1363 
1364 	return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1365 }
1366 
amdgpu_gfx_set_compute_partition(struct device * dev,struct device_attribute * addr,const char * buf,size_t count)1367 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1368 						struct device_attribute *addr,
1369 						const char *buf, size_t count)
1370 {
1371 	struct drm_device *ddev = dev_get_drvdata(dev);
1372 	struct amdgpu_device *adev = drm_to_adev(ddev);
1373 	enum amdgpu_gfx_partition mode;
1374 	int ret = 0, num_xcc;
1375 
1376 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1377 	if (num_xcc % 2 != 0)
1378 		return -EINVAL;
1379 
1380 	if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1381 		mode = AMDGPU_SPX_PARTITION_MODE;
1382 	} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1383 		/*
1384 		 * DPX mode needs AIDs to be in multiple of 2.
1385 		 * Each AID connects 2 XCCs.
1386 		 */
1387 		if (num_xcc%4)
1388 			return -EINVAL;
1389 		mode = AMDGPU_DPX_PARTITION_MODE;
1390 	} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1391 		if (num_xcc != 6)
1392 			return -EINVAL;
1393 		mode = AMDGPU_TPX_PARTITION_MODE;
1394 	} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1395 		if (num_xcc != 8)
1396 			return -EINVAL;
1397 		mode = AMDGPU_QPX_PARTITION_MODE;
1398 	} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1399 		mode = AMDGPU_CPX_PARTITION_MODE;
1400 	} else {
1401 		return -EINVAL;
1402 	}
1403 
1404 	/* Don't allow a switch while under reset */
1405 	if (!down_read_trylock(&adev->reset_domain->sem))
1406 		return -EPERM;
1407 
1408 	ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1409 
1410 	up_read(&adev->reset_domain->sem);
1411 
1412 	if (ret)
1413 		return ret;
1414 
1415 	return count;
1416 }
1417 
1418 static const char *xcp_desc[] = {
1419 	[AMDGPU_SPX_PARTITION_MODE] = "SPX",
1420 	[AMDGPU_DPX_PARTITION_MODE] = "DPX",
1421 	[AMDGPU_TPX_PARTITION_MODE] = "TPX",
1422 	[AMDGPU_QPX_PARTITION_MODE] = "QPX",
1423 	[AMDGPU_CPX_PARTITION_MODE] = "CPX",
1424 };
1425 
amdgpu_gfx_get_available_compute_partition(struct device * dev,struct device_attribute * addr,char * buf)1426 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1427 						struct device_attribute *addr,
1428 						char *buf)
1429 {
1430 	struct drm_device *ddev = dev_get_drvdata(dev);
1431 	struct amdgpu_device *adev = drm_to_adev(ddev);
1432 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1433 	int size = 0, mode;
1434 	char *sep = "";
1435 
1436 	if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
1437 		return sysfs_emit(buf, "Not supported\n");
1438 
1439 	for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
1440 		size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
1441 		sep = ", ";
1442 	}
1443 
1444 	size += sysfs_emit_at(buf, size, "\n");
1445 
1446 	return size;
1447 }
1448 
amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring * ring)1449 static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
1450 {
1451 	struct amdgpu_device *adev = ring->adev;
1452 	struct drm_gpu_scheduler *sched = &ring->sched;
1453 	struct drm_sched_entity entity;
1454 	static atomic_t counter;
1455 	struct dma_fence *f;
1456 	struct amdgpu_job *job;
1457 	struct amdgpu_ib *ib;
1458 	void *owner;
1459 	int i, r;
1460 
1461 	/* Initialize the scheduler entity */
1462 	r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
1463 				  &sched, 1, NULL);
1464 	if (r) {
1465 		dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
1466 		goto err;
1467 	}
1468 
1469 	/*
1470 	 * Use some unique dummy value as the owner to make sure we execute
1471 	 * the cleaner shader on each submission. The value just need to change
1472 	 * for each submission and is otherwise meaningless.
1473 	 */
1474 	owner = (void *)(unsigned long)atomic_inc_return(&counter);
1475 
1476 	r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
1477 				     64, 0, &job);
1478 	if (r)
1479 		goto err;
1480 
1481 	job->enforce_isolation = true;
1482 	/* always run the cleaner shader */
1483 	job->run_cleaner_shader = true;
1484 
1485 	ib = &job->ibs[0];
1486 	for (i = 0; i <= ring->funcs->align_mask; ++i)
1487 		ib->ptr[i] = ring->funcs->nop;
1488 	ib->length_dw = ring->funcs->align_mask + 1;
1489 
1490 	f = amdgpu_job_submit(job);
1491 
1492 	r = dma_fence_wait(f, false);
1493 	if (r)
1494 		goto err;
1495 
1496 	dma_fence_put(f);
1497 
1498 	/* Clean up the scheduler entity */
1499 	drm_sched_entity_destroy(&entity);
1500 	return 0;
1501 
1502 err:
1503 	return r;
1504 }
1505 
amdgpu_gfx_run_cleaner_shader(struct amdgpu_device * adev,int xcp_id)1506 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
1507 {
1508 	int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1509 	struct amdgpu_ring *ring;
1510 	int num_xcc_to_clear;
1511 	int i, r, xcc_id;
1512 
1513 	if (adev->gfx.num_xcc_per_xcp)
1514 		num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
1515 	else
1516 		num_xcc_to_clear = 1;
1517 
1518 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1519 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1520 			ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1521 			if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
1522 				r = amdgpu_gfx_run_cleaner_shader_job(ring);
1523 				if (r)
1524 					return r;
1525 				num_xcc_to_clear--;
1526 				break;
1527 			}
1528 		}
1529 	}
1530 
1531 	if (num_xcc_to_clear)
1532 		return -ENOENT;
1533 
1534 	return 0;
1535 }
1536 
1537 /**
1538  * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
1539  * @dev: The device structure
1540  * @attr: The device attribute structure
1541  * @buf: The buffer containing the input data
1542  * @count: The size of the input data
1543  *
1544  * Provides the sysfs interface to manually run a cleaner shader, which is
1545  * used to clear the GPU state between different tasks. Writing a value to the
1546  * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
1547  * The value written corresponds to the partition index on multi-partition
1548  * devices. On single-partition devices, the value should be '0'.
1549  *
1550  * The cleaner shader clears the Local Data Store (LDS) and General Purpose
1551  * Registers (GPRs) to ensure data isolation between GPU workloads.
1552  *
1553  * Return: The number of bytes written to the sysfs file.
1554  */
amdgpu_gfx_set_run_cleaner_shader(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1555 static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
1556 						 struct device_attribute *attr,
1557 						 const char *buf,
1558 						 size_t count)
1559 {
1560 	struct drm_device *ddev = dev_get_drvdata(dev);
1561 	struct amdgpu_device *adev = drm_to_adev(ddev);
1562 	int ret;
1563 	long value;
1564 
1565 	if (amdgpu_in_reset(adev))
1566 		return -EPERM;
1567 	if (adev->in_suspend && !adev->in_runpm)
1568 		return -EPERM;
1569 
1570 	if (adev->gfx.disable_kq)
1571 		return -EPERM;
1572 
1573 	ret = kstrtol(buf, 0, &value);
1574 
1575 	if (ret)
1576 		return -EINVAL;
1577 
1578 	if (value < 0)
1579 		return -EINVAL;
1580 
1581 	if (adev->xcp_mgr) {
1582 		if (value >= adev->xcp_mgr->num_xcps)
1583 			return -EINVAL;
1584 	} else {
1585 		if (value > 1)
1586 			return -EINVAL;
1587 	}
1588 
1589 	ret = pm_runtime_get_sync(ddev->dev);
1590 	if (ret < 0) {
1591 		pm_runtime_put_autosuspend(ddev->dev);
1592 		return ret;
1593 	}
1594 
1595 	ret = amdgpu_gfx_run_cleaner_shader(adev, value);
1596 
1597 	pm_runtime_mark_last_busy(ddev->dev);
1598 	pm_runtime_put_autosuspend(ddev->dev);
1599 
1600 	if (ret)
1601 		return ret;
1602 
1603 	return count;
1604 }
1605 
1606 /**
1607  * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
1608  * @dev: The device structure
1609  * @attr: The device attribute structure
1610  * @buf: The buffer to store the output data
1611  *
1612  * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
1613  * feature for each GPU partition. Reading from the 'enforce_isolation'
1614  * sysfs file returns the isolation settings for all partitions, where '0'
1615  * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
1616  * and '3' indicates enabled without cleaner shader.
1617  *
1618  * Return: The number of bytes read from the sysfs file.
1619  */
amdgpu_gfx_get_enforce_isolation(struct device * dev,struct device_attribute * attr,char * buf)1620 static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
1621 						struct device_attribute *attr,
1622 						char *buf)
1623 {
1624 	struct drm_device *ddev = dev_get_drvdata(dev);
1625 	struct amdgpu_device *adev = drm_to_adev(ddev);
1626 	int i;
1627 	ssize_t size = 0;
1628 
1629 	if (adev->xcp_mgr) {
1630 		for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
1631 			size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
1632 			if (i < (adev->xcp_mgr->num_xcps - 1))
1633 				size += sysfs_emit_at(buf, size, " ");
1634 		}
1635 		buf[size++] = '\n';
1636 	} else {
1637 		size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
1638 	}
1639 
1640 	return size;
1641 }
1642 
1643 /**
1644  * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
1645  * @dev: The device structure
1646  * @attr: The device attribute structure
1647  * @buf: The buffer containing the input data
1648  * @count: The size of the input data
1649  *
1650  * This function allows control over the 'enforce_isolation' feature, which
1651  * serializes access to the graphics engine. Writing '0' to disable, '1' to
1652  * enable isolation with cleaner shader, '2' to enable legacy isolation without
1653  * cleaner shader, or '3' to enable process isolation without submitting the
1654  * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
1655  * for each partition. The input should specify the setting for all
1656  * partitions.
1657  *
1658  * Return: The number of bytes written to the sysfs file.
1659  */
amdgpu_gfx_set_enforce_isolation(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1660 static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
1661 						struct device_attribute *attr,
1662 						const char *buf, size_t count)
1663 {
1664 	struct drm_device *ddev = dev_get_drvdata(dev);
1665 	struct amdgpu_device *adev = drm_to_adev(ddev);
1666 	long partition_values[MAX_XCP] = {0};
1667 	int ret, i, num_partitions;
1668 	const char *input_buf = buf;
1669 
1670 	for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
1671 		ret = sscanf(input_buf, "%ld", &partition_values[i]);
1672 		if (ret <= 0)
1673 			break;
1674 
1675 		/* Move the pointer to the next value in the string */
1676 		input_buf = strchr(input_buf, ' ');
1677 		if (input_buf) {
1678 			input_buf++;
1679 		} else {
1680 			i++;
1681 			break;
1682 		}
1683 	}
1684 	num_partitions = i;
1685 
1686 	if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
1687 		return -EINVAL;
1688 
1689 	if (!adev->xcp_mgr && num_partitions != 1)
1690 		return -EINVAL;
1691 
1692 	for (i = 0; i < num_partitions; i++) {
1693 		if (partition_values[i] != 0 &&
1694 		    partition_values[i] != 1 &&
1695 		    partition_values[i] != 2 &&
1696 		    partition_values[i] != 3)
1697 			return -EINVAL;
1698 	}
1699 
1700 	mutex_lock(&adev->enforce_isolation_mutex);
1701 	for (i = 0; i < num_partitions; i++) {
1702 		switch (partition_values[i]) {
1703 		case 0:
1704 		default:
1705 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1706 			break;
1707 		case 1:
1708 			adev->enforce_isolation[i] =
1709 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
1710 			break;
1711 		case 2:
1712 			adev->enforce_isolation[i] =
1713 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1714 			break;
1715 		case 3:
1716 			adev->enforce_isolation[i] =
1717 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1718 			break;
1719 		}
1720 	}
1721 	mutex_unlock(&adev->enforce_isolation_mutex);
1722 
1723 	amdgpu_mes_update_enforce_isolation(adev);
1724 
1725 	return count;
1726 }
1727 
amdgpu_gfx_get_gfx_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)1728 static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
1729 						struct device_attribute *attr,
1730 						char *buf)
1731 {
1732 	struct drm_device *ddev = dev_get_drvdata(dev);
1733 	struct amdgpu_device *adev = drm_to_adev(ddev);
1734 
1735 	if (!adev)
1736 		return -ENODEV;
1737 
1738 	return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
1739 }
1740 
amdgpu_gfx_get_compute_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)1741 static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
1742 						struct device_attribute *attr,
1743 						char *buf)
1744 {
1745 	struct drm_device *ddev = dev_get_drvdata(dev);
1746 	struct amdgpu_device *adev = drm_to_adev(ddev);
1747 
1748 	if (!adev)
1749 		return -ENODEV;
1750 
1751 	return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
1752 }
1753 
1754 static DEVICE_ATTR(run_cleaner_shader, 0200,
1755 		   NULL, amdgpu_gfx_set_run_cleaner_shader);
1756 
1757 static DEVICE_ATTR(enforce_isolation, 0644,
1758 		   amdgpu_gfx_get_enforce_isolation,
1759 		   amdgpu_gfx_set_enforce_isolation);
1760 
1761 static DEVICE_ATTR(current_compute_partition, 0644,
1762 		   amdgpu_gfx_get_current_compute_partition,
1763 		   amdgpu_gfx_set_compute_partition);
1764 
1765 static DEVICE_ATTR(available_compute_partition, 0444,
1766 		   amdgpu_gfx_get_available_compute_partition, NULL);
1767 static DEVICE_ATTR(gfx_reset_mask, 0444,
1768 		   amdgpu_gfx_get_gfx_reset_mask, NULL);
1769 
1770 static DEVICE_ATTR(compute_reset_mask, 0444,
1771 		   amdgpu_gfx_get_compute_reset_mask, NULL);
1772 
amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device * adev)1773 static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
1774 {
1775 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1776 	bool xcp_switch_supported;
1777 	int r;
1778 
1779 	if (!xcp_mgr)
1780 		return 0;
1781 
1782 	xcp_switch_supported =
1783 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1784 
1785 	if (!xcp_switch_supported)
1786 		dev_attr_current_compute_partition.attr.mode &=
1787 			~(S_IWUSR | S_IWGRP | S_IWOTH);
1788 
1789 	r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1790 	if (r)
1791 		return r;
1792 
1793 	if (xcp_switch_supported)
1794 		r = device_create_file(adev->dev,
1795 				       &dev_attr_available_compute_partition);
1796 
1797 	return r;
1798 }
1799 
amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device * adev)1800 static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
1801 {
1802 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
1803 	bool xcp_switch_supported;
1804 
1805 	if (!xcp_mgr)
1806 		return;
1807 
1808 	xcp_switch_supported =
1809 		(xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
1810 	device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1811 
1812 	if (xcp_switch_supported)
1813 		device_remove_file(adev->dev,
1814 				   &dev_attr_available_compute_partition);
1815 }
1816 
amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device * adev)1817 static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
1818 {
1819 	int r;
1820 
1821 	r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
1822 	if (r)
1823 		return r;
1824 	if (adev->gfx.enable_cleaner_shader)
1825 		r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
1826 
1827 	return r;
1828 }
1829 
amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device * adev)1830 static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
1831 {
1832 	device_remove_file(adev->dev, &dev_attr_enforce_isolation);
1833 	if (adev->gfx.enable_cleaner_shader)
1834 		device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
1835 }
1836 
amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device * adev)1837 static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
1838 {
1839 	int r = 0;
1840 
1841 	if (!amdgpu_gpu_recovery)
1842 		return r;
1843 
1844 	if (adev->gfx.num_gfx_rings) {
1845 		r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
1846 		if (r)
1847 			return r;
1848 	}
1849 
1850 	if (adev->gfx.num_compute_rings) {
1851 		r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
1852 		if (r)
1853 			return r;
1854 	}
1855 
1856 	return r;
1857 }
1858 
amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device * adev)1859 static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1860 {
1861 	if (!amdgpu_gpu_recovery)
1862 		return;
1863 
1864 	if (adev->gfx.num_gfx_rings)
1865 		device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
1866 
1867 	if (adev->gfx.num_compute_rings)
1868 		device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
1869 }
1870 
amdgpu_gfx_sysfs_init(struct amdgpu_device * adev)1871 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1872 {
1873 	int r;
1874 
1875 	r = amdgpu_gfx_sysfs_xcp_init(adev);
1876 	if (r) {
1877 		dev_err(adev->dev, "failed to create xcp sysfs files");
1878 		return r;
1879 	}
1880 
1881 	r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
1882 	if (r)
1883 		dev_err(adev->dev, "failed to create isolation sysfs files");
1884 
1885 	r = amdgpu_gfx_sysfs_reset_mask_init(adev);
1886 	if (r)
1887 		dev_err(adev->dev, "failed to create reset mask sysfs files");
1888 
1889 	return r;
1890 }
1891 
amdgpu_gfx_sysfs_fini(struct amdgpu_device * adev)1892 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1893 {
1894 	if (adev->dev->kobj.sd) {
1895 		amdgpu_gfx_sysfs_xcp_fini(adev);
1896 		amdgpu_gfx_sysfs_isolation_shader_fini(adev);
1897 		amdgpu_gfx_sysfs_reset_mask_fini(adev);
1898 	}
1899 }
1900 
amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device * adev,unsigned int cleaner_shader_size)1901 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
1902 				      unsigned int cleaner_shader_size)
1903 {
1904 	if (!adev->gfx.enable_cleaner_shader)
1905 		return -EOPNOTSUPP;
1906 
1907 	return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
1908 				       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
1909 				       &adev->gfx.cleaner_shader_obj,
1910 				       &adev->gfx.cleaner_shader_gpu_addr,
1911 				       (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1912 }
1913 
amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device * adev)1914 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
1915 {
1916 	if (!adev->gfx.enable_cleaner_shader)
1917 		return;
1918 
1919 	amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
1920 			      &adev->gfx.cleaner_shader_gpu_addr,
1921 			      (void **)&adev->gfx.cleaner_shader_cpu_ptr);
1922 }
1923 
amdgpu_gfx_cleaner_shader_init(struct amdgpu_device * adev,unsigned int cleaner_shader_size,const void * cleaner_shader_ptr)1924 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
1925 				    unsigned int cleaner_shader_size,
1926 				    const void *cleaner_shader_ptr)
1927 {
1928 	if (!adev->gfx.enable_cleaner_shader)
1929 		return;
1930 
1931 	if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
1932 		memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
1933 			    cleaner_shader_size);
1934 }
1935 
1936 /**
1937  * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
1938  * @adev: amdgpu_device pointer
1939  * @idx: Index of the scheduler to control
1940  * @enable: Whether to enable or disable the KFD scheduler
1941  *
1942  * This function is used to control the KFD (Kernel Fusion Driver) scheduler
1943  * from the KGD. It is part of the cleaner shader feature. This function plays
1944  * a key role in enforcing process isolation on the GPU.
1945  *
1946  * The function uses a reference count mechanism (kfd_sch_req_count) to keep
1947  * track of the number of requests to enable the KFD scheduler. When a request
1948  * to enable the KFD scheduler is made, the reference count is decremented.
1949  * When the reference count reaches zero, a delayed work is scheduled to
1950  * enforce isolation after a delay of GFX_SLICE_PERIOD.
1951  *
1952  * When a request to disable the KFD scheduler is made, the function first
1953  * checks if the reference count is zero. If it is, it cancels the delayed work
1954  * for enforcing isolation and checks if the KFD scheduler is active. If the
1955  * KFD scheduler is active, it sends a request to stop the KFD scheduler and
1956  * sets the KFD scheduler state to inactive. Then, it increments the reference
1957  * count.
1958  *
1959  * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
1960  * scheduler state and reference count are updated atomically.
1961  *
1962  * Note: If the reference count is already zero when a request to enable the
1963  * KFD scheduler is made, it means there's an imbalance bug somewhere. The
1964  * function triggers a warning in this case.
1965  */
amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device * adev,u32 idx,bool enable)1966 static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
1967 				    bool enable)
1968 {
1969 	mutex_lock(&adev->gfx.userq_sch_mutex);
1970 
1971 	if (enable) {
1972 		/* If the count is already 0, it means there's an imbalance bug somewhere.
1973 		 * Note that the bug may be in a different caller than the one which triggers the
1974 		 * WARN_ON_ONCE.
1975 		 */
1976 		if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
1977 			dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
1978 			goto unlock;
1979 		}
1980 
1981 		adev->gfx.userq_sch_req_count[idx]--;
1982 
1983 		if (adev->gfx.userq_sch_req_count[idx] == 0 &&
1984 		    adev->gfx.userq_sch_inactive[idx]) {
1985 			schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
1986 					      msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
1987 		}
1988 	} else {
1989 		if (adev->gfx.userq_sch_req_count[idx] == 0) {
1990 			cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
1991 			if (!adev->gfx.userq_sch_inactive[idx]) {
1992 				amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
1993 				if (adev->kfd.init_complete)
1994 					amdgpu_amdkfd_stop_sched(adev, idx);
1995 				adev->gfx.userq_sch_inactive[idx] = true;
1996 			}
1997 		}
1998 
1999 		adev->gfx.userq_sch_req_count[idx]++;
2000 	}
2001 
2002 unlock:
2003 	mutex_unlock(&adev->gfx.userq_sch_mutex);
2004 }
2005 
2006 /**
2007  * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
2008  *
2009  * @work: work_struct.
2010  *
2011  * This function is the work handler for enforcing shader isolation on AMD GPUs.
2012  * It counts the number of emitted fences for each GFX and compute ring. If there
2013  * are any fences, it schedules the `enforce_isolation_work` to be run after a
2014  * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
2015  * Driver (KFD) to resume the runqueue. The function is synchronized using the
2016  * `enforce_isolation_mutex`.
2017  */
amdgpu_gfx_enforce_isolation_handler(struct work_struct * work)2018 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
2019 {
2020 	struct amdgpu_isolation_work *isolation_work =
2021 		container_of(work, struct amdgpu_isolation_work, work.work);
2022 	struct amdgpu_device *adev = isolation_work->adev;
2023 	u32 i, idx, fences = 0;
2024 
2025 	if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
2026 		idx = 0;
2027 	else
2028 		idx = isolation_work->xcp_id;
2029 
2030 	if (idx >= MAX_XCP)
2031 		return;
2032 
2033 	mutex_lock(&adev->enforce_isolation_mutex);
2034 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
2035 		if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
2036 			fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2037 	}
2038 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
2039 		if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
2040 			fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2041 	}
2042 	if (fences) {
2043 		/* we've already had our timeslice, so let's wrap this up */
2044 		schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
2045 				      msecs_to_jiffies(1));
2046 	} else {
2047 		/* Tell KFD to resume the runqueue */
2048 		WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
2049 		WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
2050 
2051 		amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
2052 		if (adev->kfd.init_complete)
2053 			amdgpu_amdkfd_start_sched(adev, idx);
2054 		adev->gfx.userq_sch_inactive[idx] = false;
2055 	}
2056 	mutex_unlock(&adev->enforce_isolation_mutex);
2057 }
2058 
2059 /**
2060  * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
2061  * @adev: amdgpu_device pointer
2062  * @idx: Index of the GPU partition
2063  *
2064  * When kernel submissions come in, the jobs are given a time slice and once
2065  * that time slice is up, if there are KFD user queues active, kernel
2066  * submissions are blocked until KFD has had its time slice. Once the KFD time
2067  * slice is up, KFD user queues are preempted and kernel submissions are
2068  * unblocked and allowed to run again.
2069  */
2070 static void
amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device * adev,u32 idx)2071 amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
2072 					  u32 idx)
2073 {
2074 	unsigned long cjiffies;
2075 	bool wait = false;
2076 
2077 	mutex_lock(&adev->enforce_isolation_mutex);
2078 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2079 		/* set the initial values if nothing is set */
2080 		if (!adev->gfx.enforce_isolation_jiffies[idx]) {
2081 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2082 			adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
2083 		}
2084 		/* Make sure KFD gets a chance to run */
2085 		if (amdgpu_amdkfd_compute_active(adev, idx)) {
2086 			cjiffies = jiffies;
2087 			if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
2088 				cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
2089 				if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
2090 					/* if our time is up, let KGD work drain before scheduling more */
2091 					wait = true;
2092 					/* reset the timer period */
2093 					adev->gfx.enforce_isolation_time[idx] =	GFX_SLICE_PERIOD_MS;
2094 				} else {
2095 					/* set the timer period to what's left in our time slice */
2096 					adev->gfx.enforce_isolation_time[idx] =
2097 						GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
2098 				}
2099 			} else {
2100 				/* if jiffies wrap around we will just wait a little longer */
2101 				adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2102 			}
2103 		} else {
2104 			/* if there is no KFD work, then set the full slice period */
2105 			adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
2106 			adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
2107 		}
2108 	}
2109 	mutex_unlock(&adev->enforce_isolation_mutex);
2110 
2111 	if (wait)
2112 		msleep(GFX_SLICE_PERIOD_MS);
2113 }
2114 
2115 /**
2116  * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
2117  * @ring: Pointer to the amdgpu_ring structure
2118  *
2119  * Ring begin_use helper implementation for gfx which serializes access to the
2120  * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2121  * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2122  * each get a time slice when both are active.
2123  */
amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring * ring)2124 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2125 {
2126 	struct amdgpu_device *adev = ring->adev;
2127 	u32 idx;
2128 	bool sched_work = false;
2129 
2130 	if (!adev->gfx.enable_cleaner_shader)
2131 		return;
2132 
2133 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2134 		idx = 0;
2135 	else
2136 		idx = ring->xcp_id;
2137 
2138 	if (idx >= MAX_XCP)
2139 		return;
2140 
2141 	/* Don't submit more work until KFD has had some time */
2142 	amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
2143 
2144 	mutex_lock(&adev->enforce_isolation_mutex);
2145 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2146 		if (adev->kfd.init_complete)
2147 			sched_work = true;
2148 	}
2149 	mutex_unlock(&adev->enforce_isolation_mutex);
2150 
2151 	if (sched_work)
2152 		amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
2153 }
2154 
2155 /**
2156  * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
2157  * @ring: Pointer to the amdgpu_ring structure
2158  *
2159  * Ring end_use helper implementation for gfx which serializes access to the
2160  * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
2161  * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
2162  * each get a time slice when both are active.
2163  */
amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring * ring)2164 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2165 {
2166 	struct amdgpu_device *adev = ring->adev;
2167 	u32 idx;
2168 	bool sched_work = false;
2169 
2170 	if (!adev->gfx.enable_cleaner_shader)
2171 		return;
2172 
2173 	if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
2174 		idx = 0;
2175 	else
2176 		idx = ring->xcp_id;
2177 
2178 	if (idx >= MAX_XCP)
2179 		return;
2180 
2181 	mutex_lock(&adev->enforce_isolation_mutex);
2182 	if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
2183 		if (adev->kfd.init_complete)
2184 			sched_work = true;
2185 	}
2186 	mutex_unlock(&adev->enforce_isolation_mutex);
2187 
2188 	if (sched_work)
2189 		amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
2190 }
2191 
amdgpu_gfx_profile_idle_work_handler(struct work_struct * work)2192 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
2193 {
2194 	struct amdgpu_device *adev =
2195 		container_of(work, struct amdgpu_device, gfx.idle_work.work);
2196 	enum PP_SMC_POWER_PROFILE profile;
2197 	u32 i, fences = 0;
2198 	int r;
2199 
2200 	if (adev->gfx.num_gfx_rings)
2201 		profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2202 	else
2203 		profile = PP_SMC_POWER_PROFILE_COMPUTE;
2204 
2205 	for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
2206 		fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
2207 	for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
2208 		fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
2209 	if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
2210 		mutex_lock(&adev->gfx.workload_profile_mutex);
2211 		if (adev->gfx.workload_profile_active) {
2212 			r = amdgpu_dpm_switch_power_profile(adev, profile, false);
2213 			if (r)
2214 				dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2215 					 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2216 					 "fullscreen 3D" : "compute");
2217 			adev->gfx.workload_profile_active = false;
2218 		}
2219 		mutex_unlock(&adev->gfx.workload_profile_mutex);
2220 	} else {
2221 		schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2222 	}
2223 }
2224 
amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring * ring)2225 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
2226 {
2227 	struct amdgpu_device *adev = ring->adev;
2228 	enum PP_SMC_POWER_PROFILE profile;
2229 	int r;
2230 
2231 	if (amdgpu_dpm_is_overdrive_enabled(adev))
2232 		return;
2233 
2234 	if (adev->gfx.num_gfx_rings)
2235 		profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
2236 	else
2237 		profile = PP_SMC_POWER_PROFILE_COMPUTE;
2238 
2239 	atomic_inc(&adev->gfx.total_submission_cnt);
2240 
2241 	cancel_delayed_work_sync(&adev->gfx.idle_work);
2242 
2243 	/* We can safely return early here because we've cancelled the
2244 	 * the delayed work so there is no one else to set it to false
2245 	 * and we don't care if someone else sets it to true.
2246 	 */
2247 	if (adev->gfx.workload_profile_active)
2248 		return;
2249 
2250 	mutex_lock(&adev->gfx.workload_profile_mutex);
2251 	if (!adev->gfx.workload_profile_active) {
2252 		r = amdgpu_dpm_switch_power_profile(adev, profile, true);
2253 		if (r)
2254 			dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
2255 				 profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
2256 				 "fullscreen 3D" : "compute");
2257 		adev->gfx.workload_profile_active = true;
2258 	}
2259 	mutex_unlock(&adev->gfx.workload_profile_mutex);
2260 }
2261 
amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring * ring)2262 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
2263 {
2264 	struct amdgpu_device *adev = ring->adev;
2265 
2266 	if (amdgpu_dpm_is_overdrive_enabled(adev))
2267 		return;
2268 
2269 	atomic_dec(&ring->adev->gfx.total_submission_cnt);
2270 
2271 	schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
2272 }
2273 
2274 /**
2275  * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
2276  *
2277  * @buffer: This is an output variable that gets the PACKET3 preamble setup.
2278  *
2279  * Return:
2280  * return the latest index.
2281  */
amdgpu_gfx_csb_preamble_start(volatile u32 * buffer)2282 u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
2283 {
2284 	u32 count = 0;
2285 
2286 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2287 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2288 
2289 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2290 	buffer[count++] = cpu_to_le32(0x80000000);
2291 	buffer[count++] = cpu_to_le32(0x80000000);
2292 
2293 	return count;
2294 }
2295 
2296 /**
2297  * amdgpu_gfx_csb_data_parser - Parser CS data
2298  *
2299  * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
2300  * @buffer: This is an output variable that gets the PACKET3 preamble end.
2301  * @count: Index to start set the preemble end.
2302  *
2303  * Return:
2304  * return the latest index.
2305  */
amdgpu_gfx_csb_data_parser(struct amdgpu_device * adev,volatile u32 * buffer,u32 count)2306 u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
2307 {
2308 	const struct cs_section_def *sect = NULL;
2309 	const struct cs_extent_def *ext = NULL;
2310 	u32 i;
2311 
2312 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2313 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2314 			if (sect->id == SECT_CONTEXT) {
2315 				buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2316 				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2317 
2318 				for (i = 0; i < ext->reg_count; i++)
2319 					buffer[count++] = cpu_to_le32(ext->extent[i]);
2320 			}
2321 		}
2322 	}
2323 
2324 	return count;
2325 }
2326 
2327 /**
2328  * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
2329  *
2330  * @buffer: This is an output variable that gets the PACKET3 preamble end.
2331  * @count: Index to start set the preemble end.
2332  */
amdgpu_gfx_csb_preamble_end(volatile u32 * buffer,u32 count)2333 void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
2334 {
2335 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2336 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
2337 
2338 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
2339 	buffer[count++] = cpu_to_le32(0);
2340 }
2341 
2342 /*
2343  * debugfs for to enable/disable gfx job submission to specific core.
2344  */
2345 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_gfx_sched_mask_set(void * data,u64 val)2346 static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
2347 {
2348 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2349 	u32 i;
2350 	u64 mask = 0;
2351 	struct amdgpu_ring *ring;
2352 
2353 	if (!adev)
2354 		return -ENODEV;
2355 
2356 	mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
2357 	if ((val & mask) == 0)
2358 		return -EINVAL;
2359 
2360 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2361 		ring = &adev->gfx.gfx_ring[i];
2362 		if (val & (1 << i))
2363 			ring->sched.ready = true;
2364 		else
2365 			ring->sched.ready = false;
2366 	}
2367 	/* publish sched.ready flag update effective immediately across smp */
2368 	smp_rmb();
2369 	return 0;
2370 }
2371 
amdgpu_debugfs_gfx_sched_mask_get(void * data,u64 * val)2372 static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
2373 {
2374 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2375 	u32 i;
2376 	u64 mask = 0;
2377 	struct amdgpu_ring *ring;
2378 
2379 	if (!adev)
2380 		return -ENODEV;
2381 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
2382 		ring = &adev->gfx.gfx_ring[i];
2383 		if (ring->sched.ready)
2384 			mask |= 1ULL << i;
2385 	}
2386 
2387 	*val = mask;
2388 	return 0;
2389 }
2390 
2391 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
2392 			 amdgpu_debugfs_gfx_sched_mask_get,
2393 			 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
2394 
2395 #endif
2396 
amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device * adev)2397 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
2398 {
2399 #if defined(CONFIG_DEBUG_FS)
2400 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2401 	struct dentry *root = minor->debugfs_root;
2402 	char name[32];
2403 
2404 	if (!(adev->gfx.num_gfx_rings > 1))
2405 		return;
2406 	sprintf(name, "amdgpu_gfx_sched_mask");
2407 	debugfs_create_file(name, 0600, root, adev,
2408 			    &amdgpu_debugfs_gfx_sched_mask_fops);
2409 #endif
2410 }
2411 
2412 /*
2413  * debugfs for to enable/disable compute job submission to specific core.
2414  */
2415 #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_compute_sched_mask_set(void * data,u64 val)2416 static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
2417 {
2418 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2419 	u32 i;
2420 	u64 mask = 0;
2421 	struct amdgpu_ring *ring;
2422 
2423 	if (!adev)
2424 		return -ENODEV;
2425 
2426 	mask = (1ULL << adev->gfx.num_compute_rings) - 1;
2427 	if ((val & mask) == 0)
2428 		return -EINVAL;
2429 
2430 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2431 		ring = &adev->gfx.compute_ring[i];
2432 		if (val & (1 << i))
2433 			ring->sched.ready = true;
2434 		else
2435 			ring->sched.ready = false;
2436 	}
2437 
2438 	/* publish sched.ready flag update effective immediately across smp */
2439 	smp_rmb();
2440 	return 0;
2441 }
2442 
amdgpu_debugfs_compute_sched_mask_get(void * data,u64 * val)2443 static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
2444 {
2445 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
2446 	u32 i;
2447 	u64 mask = 0;
2448 	struct amdgpu_ring *ring;
2449 
2450 	if (!adev)
2451 		return -ENODEV;
2452 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
2453 		ring = &adev->gfx.compute_ring[i];
2454 		if (ring->sched.ready)
2455 			mask |= 1ULL << i;
2456 	}
2457 
2458 	*val = mask;
2459 	return 0;
2460 }
2461 
2462 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
2463 			 amdgpu_debugfs_compute_sched_mask_get,
2464 			 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
2465 
2466 #endif
2467 
amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device * adev)2468 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
2469 {
2470 #if defined(CONFIG_DEBUG_FS)
2471 	struct drm_minor *minor = adev_to_drm(adev)->primary;
2472 	struct dentry *root = minor->debugfs_root;
2473 	char name[32];
2474 
2475 	if (!(adev->gfx.num_compute_rings > 1))
2476 		return;
2477 	sprintf(name, "amdgpu_compute_sched_mask");
2478 	debugfs_create_file(name, 0600, root, adev,
2479 			    &amdgpu_debugfs_compute_sched_mask_fops);
2480 #endif
2481 }
2482