xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c (revision 86fa0b9830a9b114952d2d8766a3fb7c6c7922be)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26 
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29 
30 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
31 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
32 
33 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
34 			    struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
35 {
36 	int (*run_func)(void *handle, uint32_t inst_mask);
37 	int ret = 0;
38 
39 	if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
40 		return 0;
41 
42 	run_func = NULL;
43 
44 	switch (xcp_state) {
45 	case AMDGPU_XCP_PREPARE_SUSPEND:
46 		run_func = xcp_ip->ip_funcs->prepare_suspend;
47 		break;
48 	case AMDGPU_XCP_SUSPEND:
49 		run_func = xcp_ip->ip_funcs->suspend;
50 		break;
51 	case AMDGPU_XCP_PREPARE_RESUME:
52 		run_func = xcp_ip->ip_funcs->prepare_resume;
53 		break;
54 	case AMDGPU_XCP_RESUME:
55 		run_func = xcp_ip->ip_funcs->resume;
56 		break;
57 	}
58 
59 	if (run_func)
60 		ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
61 
62 	return ret;
63 }
64 
65 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
66 				     int state)
67 {
68 	struct amdgpu_xcp_ip *xcp_ip;
69 	struct amdgpu_xcp *xcp;
70 	int i, ret;
71 
72 	if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
73 		return -EINVAL;
74 
75 	xcp = &xcp_mgr->xcp[xcp_id];
76 	for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
77 		xcp_ip = &xcp->ip[i];
78 		ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
79 		if (ret)
80 			break;
81 	}
82 
83 	return ret;
84 }
85 
86 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
87 {
88 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
89 					 AMDGPU_XCP_PREPARE_SUSPEND);
90 }
91 
92 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
93 {
94 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
95 }
96 
97 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
98 {
99 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
100 					 AMDGPU_XCP_PREPARE_RESUME);
101 }
102 
103 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
104 {
105 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
106 }
107 
108 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
109 				   struct amdgpu_xcp_ip *ip)
110 {
111 	struct amdgpu_xcp *xcp;
112 
113 	if (!ip)
114 		return;
115 
116 	xcp = &xcp_mgr->xcp[xcp_id];
117 	xcp->ip[ip->ip_id] = *ip;
118 	xcp->ip[ip->ip_id].valid = true;
119 
120 	xcp->valid = true;
121 }
122 
123 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
124 {
125 	struct amdgpu_device *adev = xcp_mgr->adev;
126 	struct amdgpu_xcp_ip ip;
127 	uint8_t mem_id;
128 	int i, j, ret;
129 
130 	if (!num_xcps || num_xcps > MAX_XCP)
131 		return -EINVAL;
132 
133 	xcp_mgr->mode = mode;
134 
135 	for (i = 0; i < MAX_XCP; ++i)
136 		xcp_mgr->xcp[i].valid = false;
137 
138 	/* This is needed for figuring out memory id of xcp */
139 	xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
140 
141 	for (i = 0; i < num_xcps; ++i) {
142 		for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
143 			ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
144 							     &ip);
145 			if (ret)
146 				continue;
147 
148 			__amdgpu_xcp_add_block(xcp_mgr, i, &ip);
149 		}
150 
151 		xcp_mgr->xcp[i].id = i;
152 
153 		if (xcp_mgr->funcs->get_xcp_mem_id) {
154 			ret = xcp_mgr->funcs->get_xcp_mem_id(
155 				xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
156 			if (ret)
157 				continue;
158 			else
159 				xcp_mgr->xcp[i].mem_id = mem_id;
160 		}
161 	}
162 
163 	xcp_mgr->num_xcps = num_xcps;
164 	amdgpu_xcp_update_partition_sched_list(adev);
165 
166 	return 0;
167 }
168 
169 static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
170 					      int mode)
171 {
172 	int ret, curr_mode, num_xcps = 0;
173 
174 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
175 		return 0;
176 
177 	mutex_lock(&xcp_mgr->xcp_lock);
178 
179 	curr_mode = xcp_mgr->mode;
180 	/* State set to transient mode */
181 	xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
182 
183 	ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
184 
185 	if (ret) {
186 		/* Failed, get whatever mode it's at now */
187 		if (xcp_mgr->funcs->query_partition_mode)
188 			xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
189 				xcp_mgr, AMDGPU_XCP_FL_LOCKED);
190 		else
191 			xcp_mgr->mode = curr_mode;
192 
193 		goto out;
194 	}
195 	amdgpu_xcp_sysfs_entries_update(xcp_mgr);
196 out:
197 	mutex_unlock(&xcp_mgr->xcp_lock);
198 
199 	return ret;
200 }
201 
202 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
203 {
204 	if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
205 		return -EINVAL;
206 
207 	if (xcp_mgr->mode == mode)
208 		return 0;
209 
210 	return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
211 }
212 
213 int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
214 {
215 	if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
216 		return 0;
217 
218 	return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
219 }
220 
221 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
222 {
223 	int mode;
224 
225 	if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
226 	    xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
227 		return xcp_mgr->mode;
228 
229 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
230 		return xcp_mgr->mode;
231 
232 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
233 		mutex_lock(&xcp_mgr->xcp_lock);
234 	mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
235 
236 	/* First time query for VF, set the mode here */
237 	if (amdgpu_sriov_vf(xcp_mgr->adev) &&
238 	    xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
239 		xcp_mgr->mode = mode;
240 
241 	if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
242 		dev_WARN(
243 			xcp_mgr->adev->dev,
244 			"Cached partition mode %d not matching with device mode %d",
245 			xcp_mgr->mode, mode);
246 
247 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
248 		mutex_unlock(&xcp_mgr->xcp_lock);
249 
250 	return mode;
251 }
252 
253 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
254 {
255 	struct drm_device *p_ddev;
256 	struct drm_device *ddev;
257 	int i, ret;
258 
259 	ddev = adev_to_drm(adev);
260 
261 	/* xcp #0 shares drm device setting with adev */
262 	adev->xcp_mgr->xcp->ddev = ddev;
263 
264 	for (i = 1; i < MAX_XCP; i++) {
265 		ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
266 		if (ret == -ENOSPC) {
267 			dev_warn(adev->dev,
268 			"Skip xcp node #%d when out of drm node resource.", i);
269 			ret = 0;
270 			goto out;
271 		} else if (ret) {
272 			goto out;
273 		}
274 
275 		/* Redirect all IOCTLs to the primary device */
276 		adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
277 		adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
278 		adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
279 		adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
280 		p_ddev->render->dev = ddev;
281 		p_ddev->primary->dev = ddev;
282 		p_ddev->vma_offset_manager = ddev->vma_offset_manager;
283 		p_ddev->driver = &amdgpu_partition_driver;
284 		adev->xcp_mgr->xcp[i].ddev = p_ddev;
285 
286 		dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
287 	}
288 	ret = 0;
289 out:
290 	amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
291 
292 	return ret;
293 }
294 
295 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
296 			int init_num_xcps,
297 			struct amdgpu_xcp_mgr_funcs *xcp_funcs)
298 {
299 	struct amdgpu_xcp_mgr *xcp_mgr;
300 	int i;
301 
302 	if (!xcp_funcs || !xcp_funcs->get_ip_details)
303 		return -EINVAL;
304 
305 	xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
306 
307 	if (!xcp_mgr)
308 		return -ENOMEM;
309 
310 	xcp_mgr->adev = adev;
311 	xcp_mgr->funcs = xcp_funcs;
312 	xcp_mgr->mode = init_mode;
313 	mutex_init(&xcp_mgr->xcp_lock);
314 
315 	if (init_mode != AMDGPU_XCP_MODE_NONE)
316 		amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
317 
318 	adev->xcp_mgr = xcp_mgr;
319 	for (i = 0; i < MAX_XCP; ++i)
320 		xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
321 
322 	return amdgpu_xcp_dev_alloc(adev);
323 }
324 
325 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
326 			     enum AMDGPU_XCP_IP_BLOCK ip, int instance)
327 {
328 	struct amdgpu_xcp *xcp;
329 	int i, id_mask = 0;
330 
331 	if (ip >= AMDGPU_XCP_MAX_BLOCKS)
332 		return -EINVAL;
333 
334 	for (i = 0; i < xcp_mgr->num_xcps; ++i) {
335 		xcp = &xcp_mgr->xcp[i];
336 		if ((xcp->valid) && (xcp->ip[ip].valid) &&
337 		    (xcp->ip[ip].inst_mask & BIT(instance)))
338 			id_mask |= BIT(i);
339 	}
340 
341 	if (!id_mask)
342 		id_mask = -ENXIO;
343 
344 	return id_mask;
345 }
346 
347 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
348 				enum AMDGPU_XCP_IP_BLOCK ip,
349 				uint32_t *inst_mask)
350 {
351 	if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
352 		return -EINVAL;
353 
354 	*inst_mask = xcp->ip[ip].inst_mask;
355 
356 	return 0;
357 }
358 
359 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
360 			const struct pci_device_id *ent)
361 {
362 	int i, ret;
363 
364 	if (!adev->xcp_mgr)
365 		return 0;
366 
367 	for (i = 1; i < MAX_XCP; i++) {
368 		if (!adev->xcp_mgr->xcp[i].ddev)
369 			break;
370 
371 		ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
372 		if (ret)
373 			return ret;
374 	}
375 
376 	return 0;
377 }
378 
379 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
380 {
381 	struct drm_device *p_ddev;
382 	int i;
383 
384 	if (!adev->xcp_mgr)
385 		return;
386 
387 	for (i = 1; i < MAX_XCP; i++) {
388 		if (!adev->xcp_mgr->xcp[i].ddev)
389 			break;
390 
391 		p_ddev = adev->xcp_mgr->xcp[i].ddev;
392 		drm_dev_unplug(p_ddev);
393 		p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
394 		p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
395 		p_ddev->driver =  adev->xcp_mgr->xcp[i].driver;
396 		p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
397 	}
398 }
399 
400 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
401 			   struct amdgpu_fpriv *fpriv,
402 			   struct drm_file *file_priv)
403 {
404 	int i;
405 
406 	if (!adev->xcp_mgr)
407 		return 0;
408 
409 	fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
410 	for (i = 0; i < MAX_XCP; ++i) {
411 		if (!adev->xcp_mgr->xcp[i].ddev)
412 			break;
413 
414 		if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
415 			if (adev->xcp_mgr->xcp[i].valid == FALSE) {
416 				dev_err(adev->dev, "renderD%d partition %d not valid!",
417 						file_priv->minor->index, i);
418 				return -ENOENT;
419 			}
420 			dev_dbg(adev->dev, "renderD%d partition %d opened!",
421 					file_priv->minor->index, i);
422 			fpriv->xcp_id = i;
423 			break;
424 		}
425 	}
426 
427 	fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
428 				adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
429 	return 0;
430 }
431 
432 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
433 				  struct amdgpu_ctx_entity *entity)
434 {
435 	struct drm_gpu_scheduler *sched;
436 	struct amdgpu_ring *ring;
437 
438 	if (!adev->xcp_mgr)
439 		return;
440 
441 	sched = entity->entity.rq->sched;
442 	if (drm_sched_wqueue_ready(sched)) {
443 		ring = to_amdgpu_ring(entity->entity.rq->sched);
444 		atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
445 	}
446 }
447 
448 int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
449 			     u32 hw_ip, u32 hw_prio,
450 			     struct amdgpu_fpriv *fpriv,
451 			     unsigned int *num_scheds,
452 			     struct drm_gpu_scheduler ***scheds)
453 {
454 	u32 sel_xcp_id;
455 	int i;
456 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
457 
458 	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
459 		u32 least_ref_cnt = ~0;
460 
461 		fpriv->xcp_id = 0;
462 		for (i = 0; i < xcp_mgr->num_xcps; i++) {
463 			u32 total_ref_cnt;
464 
465 			total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
466 			if (total_ref_cnt < least_ref_cnt) {
467 				fpriv->xcp_id = i;
468 				least_ref_cnt = total_ref_cnt;
469 			}
470 		}
471 	}
472 	sel_xcp_id = fpriv->xcp_id;
473 
474 	if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
475 		*num_scheds =
476 			xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
477 		*scheds =
478 			xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
479 		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
480 		dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
481 	} else {
482 		dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
483 		return -ENOENT;
484 	}
485 
486 	return 0;
487 }
488 
489 static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
490 			      uint32_t inst_idx,
491 			      struct amdgpu_ring *ring)
492 {
493 	int xcp_id;
494 	enum AMDGPU_XCP_IP_BLOCK ip_blk;
495 	uint32_t inst_mask;
496 
497 	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
498 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
499 		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
500 	if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
501 	    (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
502 		return;
503 
504 	inst_mask = 1 << inst_idx;
505 
506 	switch (ring->funcs->type) {
507 	case AMDGPU_HW_IP_GFX:
508 	case AMDGPU_RING_TYPE_COMPUTE:
509 	case AMDGPU_RING_TYPE_KIQ:
510 		ip_blk = AMDGPU_XCP_GFX;
511 		break;
512 	case AMDGPU_RING_TYPE_SDMA:
513 		ip_blk = AMDGPU_XCP_SDMA;
514 		break;
515 	case AMDGPU_RING_TYPE_VCN_ENC:
516 	case AMDGPU_RING_TYPE_VCN_JPEG:
517 		ip_blk = AMDGPU_XCP_VCN;
518 		break;
519 	default:
520 		dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
521 		return;
522 	}
523 
524 	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
525 		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
526 			ring->xcp_id = xcp_id;
527 			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
528 				ring->xcp_id);
529 			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
530 				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
531 			break;
532 		}
533 	}
534 }
535 
536 static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
537 					struct amdgpu_ring *ring,
538 					unsigned int sel_xcp_id)
539 {
540 	unsigned int *num_gpu_sched;
541 
542 	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
543 			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
544 	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
545 			.sched[(*num_gpu_sched)++] = &ring->sched;
546 	dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
547 		ring->name, sel_xcp_id, ring->funcs->type,
548 		ring->hw_prio, *num_gpu_sched);
549 }
550 
551 static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
552 {
553 	struct amdgpu_ring *ring;
554 	int i;
555 
556 	for (i = 0; i < MAX_XCP; i++) {
557 		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
558 		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
559 	}
560 
561 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
562 		return 0;
563 
564 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
565 		ring = adev->rings[i];
566 		if (!ring || !ring->sched.ready || ring->no_scheduler)
567 			continue;
568 
569 		amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
570 
571 		/* VCN may be shared by two partitions under CPX MODE in certain
572 		 * configs.
573 		 */
574 		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
575 		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
576 		    (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
577 			amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
578 	}
579 
580 	return 0;
581 }
582 
583 int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
584 {
585 	int i;
586 
587 	for (i = 0; i < adev->num_rings; i++) {
588 		struct amdgpu_ring *ring = adev->rings[i];
589 
590 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
591 			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
592 			amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
593 		else
594 			amdgpu_set_xcp_id(adev, ring->me, ring);
595 	}
596 
597 	return amdgpu_xcp_sched_list_update(adev);
598 }
599 
600 void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
601 {
602 	struct amdgpu_device *adev = xcp_mgr->adev;
603 
604 	xcp_mgr->supp_xcp_modes = 0;
605 
606 	switch (NUM_XCC(adev->gfx.xcc_mask)) {
607 	case 8:
608 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
609 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
610 					  BIT(AMDGPU_QPX_PARTITION_MODE) |
611 					  BIT(AMDGPU_CPX_PARTITION_MODE);
612 		break;
613 	case 6:
614 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
615 					  BIT(AMDGPU_TPX_PARTITION_MODE) |
616 					  BIT(AMDGPU_CPX_PARTITION_MODE);
617 		break;
618 	case 4:
619 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
620 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
621 					  BIT(AMDGPU_CPX_PARTITION_MODE);
622 		break;
623 	case 2:
624 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
625 					  BIT(AMDGPU_CPX_PARTITION_MODE);
626 		break;
627 	case 1:
628 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
629 					  BIT(AMDGPU_CPX_PARTITION_MODE);
630 		break;
631 
632 	default:
633 		break;
634 	}
635 }
636 
637 int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
638 {
639 	/* TODO:
640 	 * Stop user queues and threads, and make sure GPU is empty of work.
641 	 */
642 
643 	if (flags & AMDGPU_XCP_OPS_KFD)
644 		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
645 
646 	return 0;
647 }
648 
649 int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
650 {
651 	int ret = 0;
652 
653 	if (flags & AMDGPU_XCP_OPS_KFD) {
654 		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
655 		amdgpu_amdkfd_device_init(xcp_mgr->adev);
656 		/* If KFD init failed, return failure */
657 		if (!xcp_mgr->adev->kfd.init_complete)
658 			ret = -EIO;
659 	}
660 
661 	return ret;
662 }
663 
664 /*====================== xcp sysfs - configuration ======================*/
665 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name)                         \
666 	static ssize_t amdgpu_xcp_res_sysfs_##_name##_show(        \
667 		struct amdgpu_xcp_res_details *xcp_res, char *buf) \
668 	{                                                          \
669 		return sysfs_emit(buf, "%d\n", xcp_res->_name);    \
670 	}
671 
672 struct amdgpu_xcp_res_sysfs_attribute {
673 	struct attribute attr;
674 	ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
675 };
676 
677 #define XCP_CFG_SYSFS_RES_ATTR(_name)                                        \
678 	struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
679 		.attr = { .name = __stringify(_name), .mode = 0400 },        \
680 		.show = amdgpu_xcp_res_sysfs_##_name##_show,                 \
681 	}
682 
683 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
684 XCP_CFG_SYSFS_RES_ATTR(num_inst);
685 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
686 XCP_CFG_SYSFS_RES_ATTR(num_shared);
687 
688 #define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
689 
690 static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
691 	&XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
692 	&XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
693 };
694 
695 static const char *xcp_desc[] = {
696 	[AMDGPU_SPX_PARTITION_MODE] = "SPX",
697 	[AMDGPU_DPX_PARTITION_MODE] = "DPX",
698 	[AMDGPU_TPX_PARTITION_MODE] = "TPX",
699 	[AMDGPU_QPX_PARTITION_MODE] = "QPX",
700 	[AMDGPU_CPX_PARTITION_MODE] = "CPX",
701 };
702 
703 static const char *nps_desc[] = {
704 	[UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
705 	[AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
706 	[AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
707 	[AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
708 	[AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
709 	[AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
710 	[AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
711 };
712 
713 ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
714 
715 #define to_xcp_attr(x) \
716 	container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
717 #define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
718 
719 static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
720 					   struct attribute *attr, char *buf)
721 {
722 	struct amdgpu_xcp_res_sysfs_attribute *attribute;
723 	struct amdgpu_xcp_res_details *xcp_res;
724 
725 	attribute = to_xcp_attr(attr);
726 	xcp_res = to_xcp_res(kobj);
727 
728 	if (!attribute->show)
729 		return -EIO;
730 
731 	return attribute->show(xcp_res, buf);
732 }
733 
734 static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
735 	.show = xcp_cfg_res_sysfs_attr_show,
736 };
737 
738 static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
739 	.sysfs_ops = &xcp_cfg_res_sysfs_ops,
740 	.default_groups = xcp_cfg_res_sysfs_groups,
741 };
742 
743 const char *xcp_res_names[] = {
744 	[AMDGPU_XCP_RES_XCC] = "xcc",
745 	[AMDGPU_XCP_RES_DMA] = "dma",
746 	[AMDGPU_XCP_RES_DEC] = "dec",
747 	[AMDGPU_XCP_RES_JPEG] = "jpeg",
748 };
749 
750 static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
751 				   int mode,
752 				   struct amdgpu_xcp_cfg *xcp_cfg)
753 {
754 	if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
755 		return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
756 
757 	return -EOPNOTSUPP;
758 }
759 
760 #define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
761 static ssize_t supported_xcp_configs_show(struct kobject *kobj,
762 					  struct kobj_attribute *attr, char *buf)
763 {
764 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
765 	struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
766 	int size = 0, mode;
767 	char *sep = "";
768 
769 	if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
770 		return sysfs_emit(buf, "Not supported\n");
771 
772 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
773 		size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
774 		sep = ", ";
775 	}
776 
777 	size += sysfs_emit_at(buf, size, "\n");
778 
779 	return size;
780 }
781 
782 static ssize_t supported_nps_configs_show(struct kobject *kobj,
783 					  struct kobj_attribute *attr, char *buf)
784 {
785 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
786 	int size = 0, mode;
787 	char *sep = "";
788 
789 	if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
790 		return sysfs_emit(buf, "Not supported\n");
791 
792 	for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
793 		size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
794 		sep = ", ";
795 	}
796 
797 	size += sysfs_emit_at(buf, size, "\n");
798 
799 	return size;
800 }
801 
802 static ssize_t xcp_config_show(struct kobject *kobj,
803 			       struct kobj_attribute *attr, char *buf)
804 {
805 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
806 
807 	return sysfs_emit(buf, "%s\n",
808 			  amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
809 }
810 
811 static ssize_t xcp_config_store(struct kobject *kobj,
812 				struct kobj_attribute *attr,
813 				const char *buf, size_t size)
814 {
815 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
816 	int mode, r;
817 
818 	if (!strncasecmp("SPX", buf, strlen("SPX")))
819 		mode = AMDGPU_SPX_PARTITION_MODE;
820 	else if (!strncasecmp("DPX", buf, strlen("DPX")))
821 		mode = AMDGPU_DPX_PARTITION_MODE;
822 	else if (!strncasecmp("TPX", buf, strlen("TPX")))
823 		mode = AMDGPU_TPX_PARTITION_MODE;
824 	else if (!strncasecmp("QPX", buf, strlen("QPX")))
825 		mode = AMDGPU_QPX_PARTITION_MODE;
826 	else if (!strncasecmp("CPX", buf, strlen("CPX")))
827 		mode = AMDGPU_CPX_PARTITION_MODE;
828 	else
829 		return -EINVAL;
830 
831 	r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
832 
833 	if (r)
834 		return r;
835 
836 	xcp_cfg->mode = mode;
837 	return size;
838 }
839 
840 static struct kobj_attribute xcp_cfg_sysfs_mode =
841 	__ATTR_RW_MODE(xcp_config, 0644);
842 
843 static void xcp_cfg_sysfs_release(struct kobject *kobj)
844 {
845 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
846 
847 	kfree(xcp_cfg);
848 }
849 
850 static const struct kobj_type xcp_cfg_sysfs_ktype = {
851 	.release = xcp_cfg_sysfs_release,
852 	.sysfs_ops = &kobj_sysfs_ops,
853 };
854 
855 static struct kobj_attribute supp_part_sysfs_mode =
856 	__ATTR_RO(supported_xcp_configs);
857 
858 static struct kobj_attribute supp_nps_sysfs_mode =
859 	__ATTR_RO(supported_nps_configs);
860 
861 static const struct attribute *xcp_attrs[] = {
862 	&supp_part_sysfs_mode.attr,
863 	&xcp_cfg_sysfs_mode.attr,
864 	NULL,
865 };
866 
867 static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
868 {
869 	struct amdgpu_xcp_res_details *xcp_res;
870 	struct amdgpu_xcp_cfg *xcp_cfg;
871 	int i, r, j, rid, mode;
872 
873 	if (!adev->xcp_mgr)
874 		return;
875 
876 	xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
877 	if (!xcp_cfg)
878 		return;
879 	xcp_cfg->xcp_mgr = adev->xcp_mgr;
880 
881 	r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
882 				 &adev->dev->kobj, "compute_partition_config");
883 	if (r)
884 		goto err1;
885 
886 	r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
887 	if (r)
888 		goto err1;
889 
890 	if (adev->gmc.supported_nps_modes != 0) {
891 		r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
892 		if (r) {
893 			sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
894 			goto err1;
895 		}
896 	}
897 
898 	mode = (xcp_cfg->xcp_mgr->mode ==
899 		AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
900 		       AMDGPU_SPX_PARTITION_MODE :
901 		       xcp_cfg->xcp_mgr->mode;
902 	r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
903 	if (r) {
904 		sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
905 		sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
906 		goto err1;
907 	}
908 
909 	xcp_cfg->mode = mode;
910 	for (i = 0; i < xcp_cfg->num_res; i++) {
911 		xcp_res = &xcp_cfg->xcp_res[i];
912 		rid = xcp_res->id;
913 		r = kobject_init_and_add(&xcp_res->kobj,
914 					 &xcp_cfg_res_sysfs_ktype,
915 					 &xcp_cfg->kobj, "%s",
916 					 xcp_res_names[rid]);
917 		if (r)
918 			goto err;
919 	}
920 
921 	adev->xcp_mgr->xcp_cfg = xcp_cfg;
922 	return;
923 err:
924 	for (j = 0; j < i; j++) {
925 		xcp_res = &xcp_cfg->xcp_res[i];
926 		kobject_put(&xcp_res->kobj);
927 	}
928 
929 	sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
930 	sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
931 err1:
932 	kobject_put(&xcp_cfg->kobj);
933 }
934 
935 static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
936 {
937 	struct amdgpu_xcp_res_details *xcp_res;
938 	struct amdgpu_xcp_cfg *xcp_cfg;
939 	int i;
940 
941 	if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
942 		return;
943 
944 	xcp_cfg = adev->xcp_mgr->xcp_cfg;
945 	for (i = 0; i < xcp_cfg->num_res; i++) {
946 		xcp_res = &xcp_cfg->xcp_res[i];
947 		kobject_put(&xcp_res->kobj);
948 	}
949 
950 	sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
951 	sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
952 	kobject_put(&xcp_cfg->kobj);
953 }
954 
955 /*====================== xcp sysfs - data entries ======================*/
956 
957 #define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
958 
959 static ssize_t xcp_metrics_show(struct kobject *kobj,
960 				struct kobj_attribute *attr, char *buf)
961 {
962 	struct amdgpu_xcp *xcp = to_xcp(kobj);
963 	struct amdgpu_xcp_mgr *xcp_mgr;
964 	ssize_t size;
965 
966 	xcp_mgr = xcp->xcp_mgr;
967 	size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
968 	if (size <= 0)
969 		return size;
970 
971 	if (size > PAGE_SIZE)
972 		return -ENOSPC;
973 
974 	return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
975 }
976 
977 static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
978 					   struct attribute *attr, int n)
979 {
980 	struct amdgpu_xcp *xcp = to_xcp(kobj);
981 
982 	if (!xcp || !xcp->valid)
983 		return 0;
984 
985 	return attr->mode;
986 }
987 
988 static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
989 
990 static struct attribute *amdgpu_xcp_attrs[] = {
991 	&xcp_sysfs_metrics.attr,
992 	NULL,
993 };
994 
995 static const struct attribute_group amdgpu_xcp_attrs_group = {
996 	.attrs = amdgpu_xcp_attrs,
997 	.is_visible = amdgpu_xcp_attrs_is_visible
998 };
999 
1000 static const struct kobj_type xcp_sysfs_ktype = {
1001 	.sysfs_ops = &kobj_sysfs_ops,
1002 };
1003 
1004 static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
1005 {
1006 	struct amdgpu_xcp *xcp;
1007 
1008 	for (n--; n >= 0; n--) {
1009 		xcp = &xcp_mgr->xcp[n];
1010 		if (!xcp->ddev || !xcp->valid)
1011 			continue;
1012 		sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1013 		kobject_put(&xcp->kobj);
1014 	}
1015 }
1016 
1017 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
1018 {
1019 	struct amdgpu_xcp *xcp;
1020 	int i, r;
1021 
1022 	for (i = 0; i < MAX_XCP; i++) {
1023 		/* Redirect all IOCTLs to the primary device */
1024 		xcp = &xcp_mgr->xcp[i];
1025 		if (!xcp->ddev)
1026 			break;
1027 		r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
1028 					 &xcp->ddev->dev->kobj, "xcp");
1029 		if (r)
1030 			goto out;
1031 
1032 		r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1033 		if (r)
1034 			goto out;
1035 	}
1036 
1037 	return;
1038 out:
1039 	kobject_put(&xcp->kobj);
1040 }
1041 
1042 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
1043 {
1044 	struct amdgpu_xcp *xcp;
1045 	int i;
1046 
1047 	for (i = 0; i < MAX_XCP; i++) {
1048 		/* Redirect all IOCTLs to the primary device */
1049 		xcp = &xcp_mgr->xcp[i];
1050 		if (!xcp->ddev)
1051 			continue;
1052 		sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1053 	}
1054 
1055 	return;
1056 }
1057 
1058 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
1059 {
1060 	if (!adev->xcp_mgr)
1061 		return;
1062 
1063 	amdgpu_xcp_cfg_sysfs_init(adev);
1064 
1065 	return;
1066 }
1067 
1068 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
1069 {
1070 	if (!adev->xcp_mgr)
1071 		return;
1072 	amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
1073 	amdgpu_xcp_cfg_sysfs_fini(adev);
1074 }
1075