xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26 
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29 
30 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
31 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
32 
__amdgpu_xcp_run(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp_ip * xcp_ip,int xcp_state)33 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
34 			    struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
35 {
36 	int (*run_func)(void *handle, uint32_t inst_mask);
37 	int ret = 0;
38 
39 	if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
40 		return 0;
41 
42 	run_func = NULL;
43 
44 	switch (xcp_state) {
45 	case AMDGPU_XCP_PREPARE_SUSPEND:
46 		run_func = xcp_ip->ip_funcs->prepare_suspend;
47 		break;
48 	case AMDGPU_XCP_SUSPEND:
49 		run_func = xcp_ip->ip_funcs->suspend;
50 		break;
51 	case AMDGPU_XCP_PREPARE_RESUME:
52 		run_func = xcp_ip->ip_funcs->prepare_resume;
53 		break;
54 	case AMDGPU_XCP_RESUME:
55 		run_func = xcp_ip->ip_funcs->resume;
56 		break;
57 	}
58 
59 	if (run_func)
60 		ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
61 
62 	return ret;
63 }
64 
amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,int state)65 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
66 				     int state)
67 {
68 	struct amdgpu_xcp_ip *xcp_ip;
69 	struct amdgpu_xcp *xcp;
70 	int i, ret;
71 
72 	if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
73 		return -EINVAL;
74 
75 	xcp = &xcp_mgr->xcp[xcp_id];
76 	for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
77 		xcp_ip = &xcp->ip[i];
78 		ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
79 		if (ret)
80 			break;
81 	}
82 
83 	return ret;
84 }
85 
amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)86 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
87 {
88 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
89 					 AMDGPU_XCP_PREPARE_SUSPEND);
90 }
91 
amdgpu_xcp_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)92 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
93 {
94 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
95 }
96 
amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)97 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
98 {
99 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
100 					 AMDGPU_XCP_PREPARE_RESUME);
101 }
102 
amdgpu_xcp_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)103 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
104 {
105 	return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
106 }
107 
__amdgpu_xcp_add_block(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,struct amdgpu_xcp_ip * ip)108 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
109 				   struct amdgpu_xcp_ip *ip)
110 {
111 	struct amdgpu_xcp *xcp;
112 
113 	if (!ip)
114 		return;
115 
116 	xcp = &xcp_mgr->xcp[xcp_id];
117 	xcp->ip[ip->ip_id] = *ip;
118 	xcp->ip[ip->ip_id].valid = true;
119 
120 	xcp->valid = true;
121 }
122 
amdgpu_xcp_init(struct amdgpu_xcp_mgr * xcp_mgr,int num_xcps,int mode)123 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
124 {
125 	struct amdgpu_device *adev = xcp_mgr->adev;
126 	struct amdgpu_xcp_ip ip;
127 	uint8_t mem_id;
128 	int i, j, ret;
129 
130 	if (!num_xcps || num_xcps > MAX_XCP)
131 		return -EINVAL;
132 
133 	xcp_mgr->mode = mode;
134 
135 	for (i = 0; i < MAX_XCP; ++i)
136 		xcp_mgr->xcp[i].valid = false;
137 
138 	/* This is needed for figuring out memory id of xcp */
139 	xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
140 
141 	for (i = 0; i < num_xcps; ++i) {
142 		for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
143 			ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
144 							     &ip);
145 			if (ret)
146 				continue;
147 
148 			__amdgpu_xcp_add_block(xcp_mgr, i, &ip);
149 		}
150 
151 		xcp_mgr->xcp[i].id = i;
152 
153 		if (xcp_mgr->funcs->get_xcp_mem_id) {
154 			ret = xcp_mgr->funcs->get_xcp_mem_id(
155 				xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
156 			if (ret)
157 				continue;
158 			else
159 				xcp_mgr->xcp[i].mem_id = mem_id;
160 		}
161 	}
162 
163 	xcp_mgr->num_xcps = num_xcps;
164 	amdgpu_xcp_update_partition_sched_list(adev);
165 
166 	return 0;
167 }
168 
__amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)169 static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
170 					      int mode)
171 {
172 	int ret, curr_mode, num_xcps = 0;
173 
174 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
175 		return 0;
176 
177 	mutex_lock(&xcp_mgr->xcp_lock);
178 
179 	curr_mode = xcp_mgr->mode;
180 	/* State set to transient mode */
181 	xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
182 
183 	ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
184 
185 	if (ret) {
186 		/* Failed, get whatever mode it's at now */
187 		if (xcp_mgr->funcs->query_partition_mode)
188 			xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
189 				xcp_mgr, AMDGPU_XCP_FL_LOCKED);
190 		else
191 			xcp_mgr->mode = curr_mode;
192 
193 		goto out;
194 	}
195 	amdgpu_xcp_sysfs_entries_update(xcp_mgr);
196 out:
197 	mutex_unlock(&xcp_mgr->xcp_lock);
198 
199 	return ret;
200 }
201 
amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)202 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
203 {
204 	if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
205 		return -EINVAL;
206 
207 	if (xcp_mgr->mode == mode)
208 		return 0;
209 
210 	return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
211 }
212 
amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)213 int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
214 {
215 	if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
216 		return 0;
217 
218 	return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
219 }
220 
__amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr * xcp_mgr)221 static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
222 {
223 	if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
224 		return true;
225 
226 	if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
227 	    xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
228 		return true;
229 
230 	if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
231 	    xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
232 		return true;
233 
234 	return false;
235 }
236 
amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)237 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
238 {
239 	int mode;
240 
241 	if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
242 		return xcp_mgr->mode;
243 
244 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
245 		mutex_lock(&xcp_mgr->xcp_lock);
246 	mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
247 
248 	/* First time query for VF, set the mode here */
249 	if (amdgpu_sriov_vf(xcp_mgr->adev) &&
250 	    xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
251 		xcp_mgr->mode = mode;
252 
253 	if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
254 		dev_WARN(
255 			xcp_mgr->adev->dev,
256 			"Cached partition mode %d not matching with device mode %d",
257 			xcp_mgr->mode, mode);
258 
259 	if (!(flags & AMDGPU_XCP_FL_LOCKED))
260 		mutex_unlock(&xcp_mgr->xcp_lock);
261 
262 	return mode;
263 }
264 
amdgpu_xcp_dev_alloc(struct amdgpu_device * adev)265 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
266 {
267 	struct drm_device *p_ddev;
268 	struct drm_device *ddev;
269 	int i, ret;
270 
271 	ddev = adev_to_drm(adev);
272 
273 	/* xcp #0 shares drm device setting with adev */
274 	adev->xcp_mgr->xcp->ddev = ddev;
275 
276 	for (i = 1; i < MAX_XCP; i++) {
277 		ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
278 		if (ret == -ENOSPC) {
279 			dev_warn(adev->dev,
280 			"Skip xcp node #%d when out of drm node resource.", i);
281 			ret = 0;
282 			goto out;
283 		} else if (ret) {
284 			goto out;
285 		}
286 
287 		/* Redirect all IOCTLs to the primary device */
288 		adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
289 		adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
290 		adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
291 		adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
292 		p_ddev->render->dev = ddev;
293 		p_ddev->primary->dev = ddev;
294 		p_ddev->vma_offset_manager = ddev->vma_offset_manager;
295 		p_ddev->driver = &amdgpu_partition_driver;
296 		adev->xcp_mgr->xcp[i].ddev = p_ddev;
297 
298 		dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
299 	}
300 	ret = 0;
301 out:
302 	amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
303 
304 	return ret;
305 }
306 
amdgpu_xcp_mgr_init(struct amdgpu_device * adev,int init_mode,int init_num_xcps,struct amdgpu_xcp_mgr_funcs * xcp_funcs)307 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
308 			int init_num_xcps,
309 			struct amdgpu_xcp_mgr_funcs *xcp_funcs)
310 {
311 	struct amdgpu_xcp_mgr *xcp_mgr;
312 	int i;
313 
314 	if (!xcp_funcs || !xcp_funcs->get_ip_details)
315 		return -EINVAL;
316 
317 	xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
318 
319 	if (!xcp_mgr)
320 		return -ENOMEM;
321 
322 	xcp_mgr->adev = adev;
323 	xcp_mgr->funcs = xcp_funcs;
324 	xcp_mgr->mode = init_mode;
325 	mutex_init(&xcp_mgr->xcp_lock);
326 
327 	if (init_mode != AMDGPU_XCP_MODE_NONE)
328 		amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
329 
330 	adev->xcp_mgr = xcp_mgr;
331 	for (i = 0; i < MAX_XCP; ++i)
332 		xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
333 
334 	return amdgpu_xcp_dev_alloc(adev);
335 }
336 
amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr * xcp_mgr,enum AMDGPU_XCP_IP_BLOCK ip,int instance)337 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
338 			     enum AMDGPU_XCP_IP_BLOCK ip, int instance)
339 {
340 	struct amdgpu_xcp *xcp;
341 	int i, id_mask = 0;
342 
343 	if (ip >= AMDGPU_XCP_MAX_BLOCKS)
344 		return -EINVAL;
345 
346 	for (i = 0; i < xcp_mgr->num_xcps; ++i) {
347 		xcp = &xcp_mgr->xcp[i];
348 		if ((xcp->valid) && (xcp->ip[ip].valid) &&
349 		    (xcp->ip[ip].inst_mask & BIT(instance)))
350 			id_mask |= BIT(i);
351 	}
352 
353 	if (!id_mask)
354 		id_mask = -ENXIO;
355 
356 	return id_mask;
357 }
358 
amdgpu_xcp_get_inst_details(struct amdgpu_xcp * xcp,enum AMDGPU_XCP_IP_BLOCK ip,uint32_t * inst_mask)359 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
360 				enum AMDGPU_XCP_IP_BLOCK ip,
361 				uint32_t *inst_mask)
362 {
363 	if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
364 		return -EINVAL;
365 
366 	*inst_mask = xcp->ip[ip].inst_mask;
367 
368 	return 0;
369 }
370 
amdgpu_xcp_dev_register(struct amdgpu_device * adev,const struct pci_device_id * ent)371 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
372 			const struct pci_device_id *ent)
373 {
374 	int i, ret;
375 
376 	if (!adev->xcp_mgr)
377 		return 0;
378 
379 	for (i = 1; i < MAX_XCP; i++) {
380 		if (!adev->xcp_mgr->xcp[i].ddev)
381 			break;
382 
383 		ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
384 		if (ret)
385 			return ret;
386 	}
387 
388 	return 0;
389 }
390 
amdgpu_xcp_dev_unplug(struct amdgpu_device * adev)391 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
392 {
393 	struct drm_device *p_ddev;
394 	int i;
395 
396 	if (!adev->xcp_mgr)
397 		return;
398 
399 	for (i = 1; i < MAX_XCP; i++) {
400 		if (!adev->xcp_mgr->xcp[i].ddev)
401 			break;
402 
403 		p_ddev = adev->xcp_mgr->xcp[i].ddev;
404 		drm_dev_unplug(p_ddev);
405 		p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
406 		p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
407 		p_ddev->driver =  adev->xcp_mgr->xcp[i].driver;
408 		p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
409 	}
410 }
411 
amdgpu_xcp_open_device(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * file_priv)412 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
413 			   struct amdgpu_fpriv *fpriv,
414 			   struct drm_file *file_priv)
415 {
416 	int i;
417 
418 	if (!adev->xcp_mgr)
419 		return 0;
420 
421 	fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
422 	for (i = 0; i < MAX_XCP; ++i) {
423 		if (!adev->xcp_mgr->xcp[i].ddev)
424 			break;
425 
426 		if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
427 			if (adev->xcp_mgr->xcp[i].valid == FALSE) {
428 				dev_err(adev->dev, "renderD%d partition %d not valid!",
429 						file_priv->minor->index, i);
430 				return -ENOENT;
431 			}
432 			dev_dbg(adev->dev, "renderD%d partition %d opened!",
433 					file_priv->minor->index, i);
434 			fpriv->xcp_id = i;
435 			break;
436 		}
437 	}
438 
439 	fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
440 				adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
441 	return 0;
442 }
443 
amdgpu_xcp_release_sched(struct amdgpu_device * adev,struct amdgpu_ctx_entity * entity)444 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
445 				  struct amdgpu_ctx_entity *entity)
446 {
447 	struct drm_gpu_scheduler *sched;
448 	struct amdgpu_ring *ring;
449 
450 	if (!adev->xcp_mgr)
451 		return;
452 
453 	sched = entity->entity.rq->sched;
454 	if (drm_sched_wqueue_ready(sched)) {
455 		ring = to_amdgpu_ring(entity->entity.rq->sched);
456 		atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
457 	}
458 }
459 
amdgpu_xcp_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)460 int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
461 			     u32 hw_ip, u32 hw_prio,
462 			     struct amdgpu_fpriv *fpriv,
463 			     unsigned int *num_scheds,
464 			     struct drm_gpu_scheduler ***scheds)
465 {
466 	u32 sel_xcp_id;
467 	int i;
468 	struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
469 
470 	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
471 		u32 least_ref_cnt = ~0;
472 
473 		fpriv->xcp_id = 0;
474 		for (i = 0; i < xcp_mgr->num_xcps; i++) {
475 			u32 total_ref_cnt;
476 
477 			total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
478 			if (total_ref_cnt < least_ref_cnt) {
479 				fpriv->xcp_id = i;
480 				least_ref_cnt = total_ref_cnt;
481 			}
482 		}
483 	}
484 	sel_xcp_id = fpriv->xcp_id;
485 
486 	if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
487 		*num_scheds =
488 			xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
489 		*scheds =
490 			xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
491 		atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
492 		dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
493 	} else {
494 		dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
495 		return -ENOENT;
496 	}
497 
498 	return 0;
499 }
500 
amdgpu_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)501 static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
502 			      uint32_t inst_idx,
503 			      struct amdgpu_ring *ring)
504 {
505 	int xcp_id;
506 	enum AMDGPU_XCP_IP_BLOCK ip_blk;
507 	uint32_t inst_mask;
508 
509 	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
510 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
511 		adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
512 	if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
513 	    (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
514 		return;
515 
516 	inst_mask = 1 << inst_idx;
517 
518 	switch (ring->funcs->type) {
519 	case AMDGPU_HW_IP_GFX:
520 	case AMDGPU_RING_TYPE_COMPUTE:
521 	case AMDGPU_RING_TYPE_KIQ:
522 		ip_blk = AMDGPU_XCP_GFX;
523 		break;
524 	case AMDGPU_RING_TYPE_SDMA:
525 		ip_blk = AMDGPU_XCP_SDMA;
526 		break;
527 	case AMDGPU_RING_TYPE_VCN_ENC:
528 	case AMDGPU_RING_TYPE_VCN_JPEG:
529 		ip_blk = AMDGPU_XCP_VCN;
530 		break;
531 	default:
532 		dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
533 		return;
534 	}
535 
536 	for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
537 		if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
538 			ring->xcp_id = xcp_id;
539 			dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
540 				ring->xcp_id);
541 			if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
542 				adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
543 			break;
544 		}
545 	}
546 }
547 
amdgpu_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)548 static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
549 					struct amdgpu_ring *ring,
550 					unsigned int sel_xcp_id)
551 {
552 	unsigned int *num_gpu_sched;
553 
554 	num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
555 			.gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
556 	adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
557 			.sched[(*num_gpu_sched)++] = &ring->sched;
558 	dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
559 		ring->name, sel_xcp_id, ring->funcs->type,
560 		ring->hw_prio, *num_gpu_sched);
561 }
562 
amdgpu_xcp_sched_list_update(struct amdgpu_device * adev)563 static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
564 {
565 	struct amdgpu_ring *ring;
566 	int i;
567 
568 	for (i = 0; i < MAX_XCP; i++) {
569 		atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
570 		memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
571 	}
572 
573 	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
574 		return 0;
575 
576 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
577 		ring = adev->rings[i];
578 		if (!ring || !ring->sched.ready || ring->no_scheduler)
579 			continue;
580 
581 		amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
582 
583 		/* VCN may be shared by two partitions under CPX MODE in certain
584 		 * configs.
585 		 */
586 		if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
587 		     ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
588 		    (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
589 			amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
590 	}
591 
592 	return 0;
593 }
594 
amdgpu_xcp_update_partition_sched_list(struct amdgpu_device * adev)595 int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
596 {
597 	int i;
598 
599 	for (i = 0; i < adev->num_rings; i++) {
600 		struct amdgpu_ring *ring = adev->rings[i];
601 
602 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
603 			ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
604 			amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
605 		else
606 			amdgpu_set_xcp_id(adev, ring->me, ring);
607 	}
608 
609 	return amdgpu_xcp_sched_list_update(adev);
610 }
611 
amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr * xcp_mgr)612 void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
613 {
614 	struct amdgpu_device *adev = xcp_mgr->adev;
615 
616 	xcp_mgr->supp_xcp_modes = 0;
617 
618 	switch (NUM_XCC(adev->gfx.xcc_mask)) {
619 	case 8:
620 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
621 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
622 					  BIT(AMDGPU_QPX_PARTITION_MODE) |
623 					  BIT(AMDGPU_CPX_PARTITION_MODE);
624 		break;
625 	case 6:
626 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
627 					  BIT(AMDGPU_TPX_PARTITION_MODE) |
628 					  BIT(AMDGPU_CPX_PARTITION_MODE);
629 		break;
630 	case 4:
631 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
632 					  BIT(AMDGPU_DPX_PARTITION_MODE) |
633 					  BIT(AMDGPU_CPX_PARTITION_MODE);
634 		break;
635 	case 2:
636 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
637 					  BIT(AMDGPU_CPX_PARTITION_MODE);
638 		break;
639 	case 1:
640 		xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
641 					  BIT(AMDGPU_CPX_PARTITION_MODE);
642 		break;
643 
644 	default:
645 		break;
646 	}
647 }
648 
amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)649 int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
650 {
651 	/* TODO:
652 	 * Stop user queues and threads, and make sure GPU is empty of work.
653 	 */
654 
655 	if (flags & AMDGPU_XCP_OPS_KFD)
656 		amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
657 
658 	return 0;
659 }
660 
amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)661 int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
662 {
663 	int ret = 0;
664 
665 	if (flags & AMDGPU_XCP_OPS_KFD) {
666 		amdgpu_amdkfd_device_probe(xcp_mgr->adev);
667 		amdgpu_amdkfd_device_init(xcp_mgr->adev);
668 		/* If KFD init failed, return failure */
669 		if (!xcp_mgr->adev->kfd.init_complete)
670 			ret = -EIO;
671 	}
672 
673 	return ret;
674 }
675 
676 /*====================== xcp sysfs - configuration ======================*/
677 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name)                         \
678 	static ssize_t amdgpu_xcp_res_sysfs_##_name##_show(        \
679 		struct amdgpu_xcp_res_details *xcp_res, char *buf) \
680 	{                                                          \
681 		return sysfs_emit(buf, "%d\n", xcp_res->_name);    \
682 	}
683 
684 struct amdgpu_xcp_res_sysfs_attribute {
685 	struct attribute attr;
686 	ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
687 };
688 
689 #define XCP_CFG_SYSFS_RES_ATTR(_name)                                        \
690 	struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
691 		.attr = { .name = __stringify(_name), .mode = 0400 },        \
692 		.show = amdgpu_xcp_res_sysfs_##_name##_show,                 \
693 	}
694 
695 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
696 XCP_CFG_SYSFS_RES_ATTR(num_inst);
697 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
698 XCP_CFG_SYSFS_RES_ATTR(num_shared);
699 
700 #define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
701 
702 static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
703 	&XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
704 	&XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
705 };
706 
707 static const char *xcp_desc[] = {
708 	[AMDGPU_SPX_PARTITION_MODE] = "SPX",
709 	[AMDGPU_DPX_PARTITION_MODE] = "DPX",
710 	[AMDGPU_TPX_PARTITION_MODE] = "TPX",
711 	[AMDGPU_QPX_PARTITION_MODE] = "QPX",
712 	[AMDGPU_CPX_PARTITION_MODE] = "CPX",
713 };
714 
715 static const char *nps_desc[] = {
716 	[UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
717 	[AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
718 	[AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
719 	[AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
720 	[AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
721 	[AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
722 	[AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
723 };
724 
725 ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
726 
727 #define to_xcp_attr(x) \
728 	container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
729 #define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
730 
xcp_cfg_res_sysfs_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)731 static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
732 					   struct attribute *attr, char *buf)
733 {
734 	struct amdgpu_xcp_res_sysfs_attribute *attribute;
735 	struct amdgpu_xcp_res_details *xcp_res;
736 
737 	attribute = to_xcp_attr(attr);
738 	xcp_res = to_xcp_res(kobj);
739 
740 	if (!attribute->show)
741 		return -EIO;
742 
743 	return attribute->show(xcp_res, buf);
744 }
745 
746 static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
747 	.show = xcp_cfg_res_sysfs_attr_show,
748 };
749 
750 static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
751 	.sysfs_ops = &xcp_cfg_res_sysfs_ops,
752 	.default_groups = xcp_cfg_res_sysfs_groups,
753 };
754 
755 const char *xcp_res_names[] = {
756 	[AMDGPU_XCP_RES_XCC] = "xcc",
757 	[AMDGPU_XCP_RES_DMA] = "dma",
758 	[AMDGPU_XCP_RES_DEC] = "dec",
759 	[AMDGPU_XCP_RES_JPEG] = "jpeg",
760 };
761 
amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr * xcp_mgr,int mode,struct amdgpu_xcp_cfg * xcp_cfg)762 static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
763 				   int mode,
764 				   struct amdgpu_xcp_cfg *xcp_cfg)
765 {
766 	if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
767 		return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
768 
769 	return -EOPNOTSUPP;
770 }
771 
772 #define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
supported_xcp_configs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)773 static ssize_t supported_xcp_configs_show(struct kobject *kobj,
774 					  struct kobj_attribute *attr, char *buf)
775 {
776 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
777 	struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
778 	int size = 0, mode;
779 	char *sep = "";
780 
781 	if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
782 		return sysfs_emit(buf, "Not supported\n");
783 
784 	for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
785 		size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
786 		sep = ", ";
787 	}
788 
789 	size += sysfs_emit_at(buf, size, "\n");
790 
791 	return size;
792 }
793 
supported_nps_configs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)794 static ssize_t supported_nps_configs_show(struct kobject *kobj,
795 					  struct kobj_attribute *attr, char *buf)
796 {
797 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
798 	int size = 0, mode;
799 	char *sep = "";
800 
801 	if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
802 		return sysfs_emit(buf, "Not supported\n");
803 
804 	for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
805 		size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
806 		sep = ", ";
807 	}
808 
809 	size += sysfs_emit_at(buf, size, "\n");
810 
811 	return size;
812 }
813 
xcp_config_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)814 static ssize_t xcp_config_show(struct kobject *kobj,
815 			       struct kobj_attribute *attr, char *buf)
816 {
817 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
818 
819 	return sysfs_emit(buf, "%s\n",
820 			  amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
821 }
822 
xcp_config_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t size)823 static ssize_t xcp_config_store(struct kobject *kobj,
824 				struct kobj_attribute *attr,
825 				const char *buf, size_t size)
826 {
827 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
828 	int mode, r;
829 
830 	if (!strncasecmp("SPX", buf, strlen("SPX")))
831 		mode = AMDGPU_SPX_PARTITION_MODE;
832 	else if (!strncasecmp("DPX", buf, strlen("DPX")))
833 		mode = AMDGPU_DPX_PARTITION_MODE;
834 	else if (!strncasecmp("TPX", buf, strlen("TPX")))
835 		mode = AMDGPU_TPX_PARTITION_MODE;
836 	else if (!strncasecmp("QPX", buf, strlen("QPX")))
837 		mode = AMDGPU_QPX_PARTITION_MODE;
838 	else if (!strncasecmp("CPX", buf, strlen("CPX")))
839 		mode = AMDGPU_CPX_PARTITION_MODE;
840 	else
841 		return -EINVAL;
842 
843 	r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
844 
845 	if (r)
846 		return r;
847 
848 	xcp_cfg->mode = mode;
849 	return size;
850 }
851 
852 static struct kobj_attribute xcp_cfg_sysfs_mode =
853 	__ATTR_RW_MODE(xcp_config, 0644);
854 
xcp_cfg_sysfs_release(struct kobject * kobj)855 static void xcp_cfg_sysfs_release(struct kobject *kobj)
856 {
857 	struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
858 
859 	kfree(xcp_cfg);
860 }
861 
862 static const struct kobj_type xcp_cfg_sysfs_ktype = {
863 	.release = xcp_cfg_sysfs_release,
864 	.sysfs_ops = &kobj_sysfs_ops,
865 };
866 
867 static struct kobj_attribute supp_part_sysfs_mode =
868 	__ATTR_RO(supported_xcp_configs);
869 
870 static struct kobj_attribute supp_nps_sysfs_mode =
871 	__ATTR_RO(supported_nps_configs);
872 
873 static const struct attribute *xcp_attrs[] = {
874 	&supp_part_sysfs_mode.attr,
875 	&xcp_cfg_sysfs_mode.attr,
876 	NULL,
877 };
878 
amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device * adev)879 static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
880 {
881 	struct amdgpu_xcp_res_details *xcp_res;
882 	struct amdgpu_xcp_cfg *xcp_cfg;
883 	int i, r, j, rid, mode;
884 
885 	if (!adev->xcp_mgr)
886 		return;
887 
888 	xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
889 	if (!xcp_cfg)
890 		return;
891 	xcp_cfg->xcp_mgr = adev->xcp_mgr;
892 
893 	r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
894 				 &adev->dev->kobj, "compute_partition_config");
895 	if (r)
896 		goto err1;
897 
898 	r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
899 	if (r)
900 		goto err1;
901 
902 	if (adev->gmc.supported_nps_modes != 0) {
903 		r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
904 		if (r) {
905 			sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
906 			goto err1;
907 		}
908 	}
909 
910 	mode = (xcp_cfg->xcp_mgr->mode ==
911 		AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
912 		       AMDGPU_SPX_PARTITION_MODE :
913 		       xcp_cfg->xcp_mgr->mode;
914 	r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
915 	if (r) {
916 		sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
917 		sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
918 		goto err1;
919 	}
920 
921 	xcp_cfg->mode = mode;
922 	for (i = 0; i < xcp_cfg->num_res; i++) {
923 		xcp_res = &xcp_cfg->xcp_res[i];
924 		rid = xcp_res->id;
925 		r = kobject_init_and_add(&xcp_res->kobj,
926 					 &xcp_cfg_res_sysfs_ktype,
927 					 &xcp_cfg->kobj, "%s",
928 					 xcp_res_names[rid]);
929 		if (r)
930 			goto err;
931 	}
932 
933 	adev->xcp_mgr->xcp_cfg = xcp_cfg;
934 	return;
935 err:
936 	for (j = 0; j < i; j++) {
937 		xcp_res = &xcp_cfg->xcp_res[i];
938 		kobject_put(&xcp_res->kobj);
939 	}
940 
941 	sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
942 	sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
943 err1:
944 	kobject_put(&xcp_cfg->kobj);
945 }
946 
amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device * adev)947 static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
948 {
949 	struct amdgpu_xcp_res_details *xcp_res;
950 	struct amdgpu_xcp_cfg *xcp_cfg;
951 	int i;
952 
953 	if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
954 		return;
955 
956 	xcp_cfg = adev->xcp_mgr->xcp_cfg;
957 	for (i = 0; i < xcp_cfg->num_res; i++) {
958 		xcp_res = &xcp_cfg->xcp_res[i];
959 		kobject_put(&xcp_res->kobj);
960 	}
961 
962 	sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
963 	sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
964 	kobject_put(&xcp_cfg->kobj);
965 }
966 
967 /*====================== xcp sysfs - data entries ======================*/
968 
969 #define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
970 
xcp_metrics_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)971 static ssize_t xcp_metrics_show(struct kobject *kobj,
972 				struct kobj_attribute *attr, char *buf)
973 {
974 	struct amdgpu_xcp *xcp = to_xcp(kobj);
975 	struct amdgpu_xcp_mgr *xcp_mgr;
976 	ssize_t size;
977 
978 	xcp_mgr = xcp->xcp_mgr;
979 	size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
980 	if (size <= 0)
981 		return size;
982 
983 	if (size > PAGE_SIZE)
984 		return -ENOSPC;
985 
986 	return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
987 }
988 
amdgpu_xcp_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int n)989 static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
990 					   struct attribute *attr, int n)
991 {
992 	struct amdgpu_xcp *xcp = to_xcp(kobj);
993 
994 	if (!xcp || !xcp->valid)
995 		return 0;
996 
997 	return attr->mode;
998 }
999 
1000 static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
1001 
1002 static struct attribute *amdgpu_xcp_attrs[] = {
1003 	&xcp_sysfs_metrics.attr,
1004 	NULL,
1005 };
1006 
1007 static const struct attribute_group amdgpu_xcp_attrs_group = {
1008 	.attrs = amdgpu_xcp_attrs,
1009 	.is_visible = amdgpu_xcp_attrs_is_visible
1010 };
1011 
1012 static const struct kobj_type xcp_sysfs_ktype = {
1013 	.sysfs_ops = &kobj_sysfs_ops,
1014 };
1015 
amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr * xcp_mgr,int n)1016 static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
1017 {
1018 	struct amdgpu_xcp *xcp;
1019 
1020 	for (n--; n >= 0; n--) {
1021 		xcp = &xcp_mgr->xcp[n];
1022 		if (!xcp->ddev || !xcp->valid)
1023 			continue;
1024 		sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1025 		kobject_put(&xcp->kobj);
1026 	}
1027 }
1028 
amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr * xcp_mgr)1029 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
1030 {
1031 	struct amdgpu_xcp *xcp;
1032 	int i, r;
1033 
1034 	for (i = 0; i < MAX_XCP; i++) {
1035 		/* Redirect all IOCTLs to the primary device */
1036 		xcp = &xcp_mgr->xcp[i];
1037 		if (!xcp->ddev)
1038 			break;
1039 		r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
1040 					 &xcp->ddev->dev->kobj, "xcp");
1041 		if (r)
1042 			goto out;
1043 
1044 		r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1045 		if (r)
1046 			goto out;
1047 	}
1048 
1049 	return;
1050 out:
1051 	kobject_put(&xcp->kobj);
1052 }
1053 
amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr * xcp_mgr)1054 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
1055 {
1056 	struct amdgpu_xcp *xcp;
1057 	int i;
1058 
1059 	for (i = 0; i < MAX_XCP; i++) {
1060 		/* Redirect all IOCTLs to the primary device */
1061 		xcp = &xcp_mgr->xcp[i];
1062 		if (!xcp->ddev)
1063 			continue;
1064 		sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1065 	}
1066 
1067 	return;
1068 }
1069 
amdgpu_xcp_sysfs_init(struct amdgpu_device * adev)1070 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
1071 {
1072 	if (!adev->xcp_mgr)
1073 		return;
1074 
1075 	amdgpu_xcp_cfg_sysfs_init(adev);
1076 
1077 	return;
1078 }
1079 
amdgpu_xcp_sysfs_fini(struct amdgpu_device * adev)1080 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
1081 {
1082 	if (!adev->xcp_mgr)
1083 		return;
1084 	amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
1085 	amdgpu_xcp_cfg_sysfs_fini(adev);
1086 }
1087