1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29
30 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
31 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
32
__amdgpu_xcp_run(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp_ip * xcp_ip,int xcp_state)33 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
34 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
35 {
36 int (*run_func)(void *handle, uint32_t inst_mask);
37 int ret = 0;
38
39 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
40 return 0;
41
42 run_func = NULL;
43
44 switch (xcp_state) {
45 case AMDGPU_XCP_PREPARE_SUSPEND:
46 run_func = xcp_ip->ip_funcs->prepare_suspend;
47 break;
48 case AMDGPU_XCP_SUSPEND:
49 run_func = xcp_ip->ip_funcs->suspend;
50 break;
51 case AMDGPU_XCP_PREPARE_RESUME:
52 run_func = xcp_ip->ip_funcs->prepare_resume;
53 break;
54 case AMDGPU_XCP_RESUME:
55 run_func = xcp_ip->ip_funcs->resume;
56 break;
57 }
58
59 if (run_func)
60 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
61
62 return ret;
63 }
64
amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,int state)65 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
66 int state)
67 {
68 struct amdgpu_xcp_ip *xcp_ip;
69 struct amdgpu_xcp *xcp;
70 int i, ret;
71
72 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
73 return -EINVAL;
74
75 xcp = &xcp_mgr->xcp[xcp_id];
76 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
77 xcp_ip = &xcp->ip[i];
78 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
79 if (ret)
80 break;
81 }
82
83 return ret;
84 }
85
amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)86 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
87 {
88 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
89 AMDGPU_XCP_PREPARE_SUSPEND);
90 }
91
amdgpu_xcp_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)92 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
93 {
94 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
95 }
96
amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)97 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
98 {
99 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
100 AMDGPU_XCP_PREPARE_RESUME);
101 }
102
amdgpu_xcp_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)103 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
104 {
105 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
106 }
107
__amdgpu_xcp_add_block(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,struct amdgpu_xcp_ip * ip)108 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
109 struct amdgpu_xcp_ip *ip)
110 {
111 struct amdgpu_xcp *xcp;
112
113 if (!ip)
114 return;
115
116 xcp = &xcp_mgr->xcp[xcp_id];
117 xcp->ip[ip->ip_id] = *ip;
118 xcp->ip[ip->ip_id].valid = true;
119
120 xcp->valid = true;
121 }
122
__amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)123 static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
124 int xcp_id)
125 {
126 struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
127 struct amdgpu_device *adev = xcp_mgr->adev;
128 uint32_t inst_mask;
129 uint64_t uid;
130 int i;
131
132 if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
133 inst_mask) {
134 i = GET_INST(GC, (ffs(inst_mask) - 1));
135 uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
136 AMDGPU_UID_TYPE_XCD, i);
137 if (uid)
138 xcp->unique_id = uid;
139 }
140 }
141
amdgpu_xcp_init(struct amdgpu_xcp_mgr * xcp_mgr,int num_xcps,int mode)142 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
143 {
144 struct amdgpu_device *adev = xcp_mgr->adev;
145 struct amdgpu_xcp_ip ip;
146 uint8_t mem_id;
147 int i, j, ret;
148
149 if (!num_xcps || num_xcps > MAX_XCP)
150 return -EINVAL;
151
152 xcp_mgr->mode = mode;
153
154 for (i = 0; i < MAX_XCP; ++i)
155 xcp_mgr->xcp[i].valid = false;
156
157 /* This is needed for figuring out memory id of xcp */
158 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
159
160 for (i = 0; i < num_xcps; ++i) {
161 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
162 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
163 &ip);
164 if (ret)
165 continue;
166
167 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
168 }
169
170 xcp_mgr->xcp[i].id = i;
171
172 if (xcp_mgr->funcs->get_xcp_mem_id) {
173 ret = xcp_mgr->funcs->get_xcp_mem_id(
174 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
175 if (ret)
176 continue;
177 else
178 xcp_mgr->xcp[i].mem_id = mem_id;
179 }
180 __amdgpu_xcp_set_unique_id(xcp_mgr, i);
181 }
182
183 xcp_mgr->num_xcps = num_xcps;
184 amdgpu_xcp_update_partition_sched_list(adev);
185
186 return 0;
187 }
188
__amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)189 static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
190 int mode)
191 {
192 int ret, curr_mode, num_xcps = 0;
193
194 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
195 return 0;
196
197 mutex_lock(&xcp_mgr->xcp_lock);
198
199 curr_mode = xcp_mgr->mode;
200 /* State set to transient mode */
201 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
202
203 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
204
205 if (ret) {
206 /* Failed, get whatever mode it's at now */
207 if (xcp_mgr->funcs->query_partition_mode)
208 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
209 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
210 else
211 xcp_mgr->mode = curr_mode;
212
213 goto out;
214 }
215 amdgpu_xcp_sysfs_entries_update(xcp_mgr);
216 out:
217 mutex_unlock(&xcp_mgr->xcp_lock);
218
219 return ret;
220 }
221
amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)222 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
223 {
224 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
225 return -EINVAL;
226
227 if (xcp_mgr->mode == mode)
228 return 0;
229
230 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
231 }
232
amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)233 int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
234 {
235 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
236 return 0;
237
238 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
239 }
240
__amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr * xcp_mgr)241 static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
242 {
243 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
244 return true;
245
246 if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
247 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
248 return true;
249
250 if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
251 xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
252 return true;
253
254 return false;
255 }
256
amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)257 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
258 {
259 int mode;
260
261 if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
262 return xcp_mgr->mode;
263
264 if (!(flags & AMDGPU_XCP_FL_LOCKED))
265 mutex_lock(&xcp_mgr->xcp_lock);
266 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
267
268 /* First time query for VF, set the mode here */
269 if (amdgpu_sriov_vf(xcp_mgr->adev) &&
270 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
271 xcp_mgr->mode = mode;
272
273 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
274 dev_WARN(
275 xcp_mgr->adev->dev,
276 "Cached partition mode %d not matching with device mode %d",
277 xcp_mgr->mode, mode);
278
279 if (!(flags & AMDGPU_XCP_FL_LOCKED))
280 mutex_unlock(&xcp_mgr->xcp_lock);
281
282 return mode;
283 }
284
amdgpu_xcp_dev_alloc(struct amdgpu_device * adev)285 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
286 {
287 struct drm_device *p_ddev;
288 struct drm_device *ddev;
289 int i, ret;
290
291 ddev = adev_to_drm(adev);
292
293 /* xcp #0 shares drm device setting with adev */
294 adev->xcp_mgr->xcp->ddev = ddev;
295
296 for (i = 1; i < MAX_XCP; i++) {
297 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
298 if (ret == -ENOSPC) {
299 dev_warn(adev->dev,
300 "Skip xcp node #%d when out of drm node resource.", i);
301 ret = 0;
302 goto out;
303 } else if (ret) {
304 goto out;
305 }
306
307 /* Redirect all IOCTLs to the primary device */
308 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
309 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
310 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
311 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
312 p_ddev->render->dev = ddev;
313 p_ddev->primary->dev = ddev;
314 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
315 p_ddev->driver = &amdgpu_partition_driver;
316 adev->xcp_mgr->xcp[i].ddev = p_ddev;
317
318 dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
319 }
320 ret = 0;
321 out:
322 amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
323
324 return ret;
325 }
326
amdgpu_xcp_mgr_init(struct amdgpu_device * adev,int init_mode,int init_num_xcps,struct amdgpu_xcp_mgr_funcs * xcp_funcs)327 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
328 int init_num_xcps,
329 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
330 {
331 struct amdgpu_xcp_mgr *xcp_mgr;
332 int i;
333
334 if (!xcp_funcs || !xcp_funcs->get_ip_details)
335 return -EINVAL;
336
337 xcp_mgr = kzalloc_obj(*xcp_mgr);
338
339 if (!xcp_mgr)
340 return -ENOMEM;
341
342 xcp_mgr->adev = adev;
343 xcp_mgr->funcs = xcp_funcs;
344 xcp_mgr->mode = init_mode;
345 mutex_init(&xcp_mgr->xcp_lock);
346
347 if (init_mode != AMDGPU_XCP_MODE_NONE)
348 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
349
350 adev->xcp_mgr = xcp_mgr;
351 for (i = 0; i < MAX_XCP; ++i)
352 xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
353
354 return amdgpu_xcp_dev_alloc(adev);
355 }
356
amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr * xcp_mgr,enum AMDGPU_XCP_IP_BLOCK ip,int instance)357 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
358 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
359 {
360 struct amdgpu_xcp *xcp;
361 int i, id_mask = 0;
362
363 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
364 return -EINVAL;
365
366 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
367 xcp = &xcp_mgr->xcp[i];
368 if ((xcp->valid) && (xcp->ip[ip].valid) &&
369 (xcp->ip[ip].inst_mask & BIT(instance)))
370 id_mask |= BIT(i);
371 }
372
373 if (!id_mask)
374 id_mask = -ENXIO;
375
376 return id_mask;
377 }
378
amdgpu_xcp_get_inst_details(struct amdgpu_xcp * xcp,enum AMDGPU_XCP_IP_BLOCK ip,uint32_t * inst_mask)379 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
380 enum AMDGPU_XCP_IP_BLOCK ip,
381 uint32_t *inst_mask)
382 {
383 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
384 return -EINVAL;
385
386 *inst_mask = xcp->ip[ip].inst_mask;
387
388 return 0;
389 }
390
amdgpu_xcp_dev_register(struct amdgpu_device * adev,const struct pci_device_id * ent)391 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
392 const struct pci_device_id *ent)
393 {
394 int i, ret;
395
396 if (!adev->xcp_mgr)
397 return 0;
398
399 for (i = 1; i < MAX_XCP; i++) {
400 if (!adev->xcp_mgr->xcp[i].ddev)
401 break;
402
403 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
404 if (ret)
405 return ret;
406 }
407
408 return 0;
409 }
410
amdgpu_xcp_dev_unplug(struct amdgpu_device * adev)411 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
412 {
413 struct drm_device *p_ddev;
414 int i;
415
416 if (!adev->xcp_mgr)
417 return;
418
419 for (i = 1; i < MAX_XCP; i++) {
420 if (!adev->xcp_mgr->xcp[i].ddev)
421 break;
422
423 p_ddev = adev->xcp_mgr->xcp[i].ddev;
424 drm_dev_unplug(p_ddev);
425 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
426 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
427 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
428 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
429 amdgpu_xcp_drm_dev_free(p_ddev);
430 }
431 }
432
amdgpu_xcp_open_device(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * file_priv)433 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
434 struct amdgpu_fpriv *fpriv,
435 struct drm_file *file_priv)
436 {
437 int i;
438
439 if (!adev->xcp_mgr)
440 return 0;
441
442 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
443 for (i = 0; i < MAX_XCP; ++i) {
444 if (!adev->xcp_mgr->xcp[i].ddev)
445 break;
446
447 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
448 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
449 dev_err(adev->dev, "renderD%d partition %d not valid!",
450 file_priv->minor->index, i);
451 return -ENOENT;
452 }
453 dev_dbg(adev->dev, "renderD%d partition %d opened!",
454 file_priv->minor->index, i);
455 fpriv->xcp_id = i;
456 break;
457 }
458 }
459
460 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
461 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
462 return 0;
463 }
464
amdgpu_xcp_release_sched(struct amdgpu_device * adev,struct amdgpu_ctx_entity * entity)465 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
466 struct amdgpu_ctx_entity *entity)
467 {
468 struct drm_gpu_scheduler *sched;
469 struct amdgpu_ring *ring;
470
471 if (!adev->xcp_mgr)
472 return;
473
474 sched = entity->entity.rq->sched;
475 if (drm_sched_wqueue_ready(sched)) {
476 ring = to_amdgpu_ring(entity->entity.rq->sched);
477 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
478 }
479 }
480
amdgpu_xcp_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)481 int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
482 u32 hw_ip, u32 hw_prio,
483 struct amdgpu_fpriv *fpriv,
484 unsigned int *num_scheds,
485 struct drm_gpu_scheduler ***scheds)
486 {
487 u32 sel_xcp_id;
488 int i;
489 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
490
491 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
492 u32 least_ref_cnt = ~0;
493
494 fpriv->xcp_id = 0;
495 for (i = 0; i < xcp_mgr->num_xcps; i++) {
496 u32 total_ref_cnt;
497
498 total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
499 if (total_ref_cnt < least_ref_cnt) {
500 fpriv->xcp_id = i;
501 least_ref_cnt = total_ref_cnt;
502 }
503 }
504 }
505 sel_xcp_id = fpriv->xcp_id;
506
507 if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
508 *num_scheds =
509 xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
510 *scheds =
511 xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
512 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
513 dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
514 } else {
515 dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
516 return -ENOENT;
517 }
518
519 return 0;
520 }
521
amdgpu_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)522 static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
523 uint32_t inst_idx,
524 struct amdgpu_ring *ring)
525 {
526 int xcp_id;
527 enum AMDGPU_XCP_IP_BLOCK ip_blk;
528 uint32_t inst_mask;
529
530 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
531 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
532 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
533 if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
534 (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
535 return;
536
537 inst_mask = 1 << inst_idx;
538
539 switch (ring->funcs->type) {
540 case AMDGPU_HW_IP_GFX:
541 case AMDGPU_RING_TYPE_COMPUTE:
542 case AMDGPU_RING_TYPE_KIQ:
543 case AMDGPU_RING_TYPE_MES:
544 ip_blk = AMDGPU_XCP_GFX;
545 break;
546 case AMDGPU_RING_TYPE_SDMA:
547 ip_blk = AMDGPU_XCP_SDMA;
548 break;
549 case AMDGPU_RING_TYPE_VCN_ENC:
550 case AMDGPU_RING_TYPE_VCN_JPEG:
551 ip_blk = AMDGPU_XCP_VCN;
552 break;
553 default:
554 dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
555 return;
556 }
557
558 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
559 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
560 ring->xcp_id = xcp_id;
561 dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
562 ring->xcp_id);
563 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
564 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
565 break;
566 }
567 }
568 }
569
amdgpu_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)570 static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
571 struct amdgpu_ring *ring,
572 unsigned int sel_xcp_id)
573 {
574 unsigned int *num_gpu_sched;
575
576 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
577 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
578 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
579 .sched[(*num_gpu_sched)++] = &ring->sched;
580 dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
581 ring->name, sel_xcp_id, ring->funcs->type,
582 ring->hw_prio, *num_gpu_sched);
583 }
584
amdgpu_xcp_sched_list_update(struct amdgpu_device * adev)585 static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
586 {
587 struct amdgpu_ring *ring;
588 int i;
589
590 for (i = 0; i < MAX_XCP; i++) {
591 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
592 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
593 }
594
595 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
596 return 0;
597
598 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
599 ring = adev->rings[i];
600 if (!ring || !ring->sched.ready || ring->no_scheduler)
601 continue;
602
603 amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
604
605 /* VCN may be shared by two partitions under CPX MODE in certain
606 * configs.
607 */
608 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
609 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
610 (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
611 amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
612 }
613
614 return 0;
615 }
616
amdgpu_xcp_update_partition_sched_list(struct amdgpu_device * adev)617 int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
618 {
619 int i;
620
621 for (i = 0; i < adev->num_rings; i++) {
622 struct amdgpu_ring *ring = adev->rings[i];
623
624 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
625 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
626 amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
627 else
628 amdgpu_set_xcp_id(adev, ring->me, ring);
629 }
630
631 return amdgpu_xcp_sched_list_update(adev);
632 }
633
amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr * xcp_mgr)634 void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
635 {
636 struct amdgpu_device *adev = xcp_mgr->adev;
637
638 xcp_mgr->supp_xcp_modes = 0;
639
640 switch (NUM_XCC(adev->gfx.xcc_mask)) {
641 case 8:
642 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
643 BIT(AMDGPU_DPX_PARTITION_MODE) |
644 BIT(AMDGPU_QPX_PARTITION_MODE) |
645 BIT(AMDGPU_CPX_PARTITION_MODE);
646 break;
647 case 6:
648 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
649 BIT(AMDGPU_TPX_PARTITION_MODE) |
650 BIT(AMDGPU_CPX_PARTITION_MODE);
651 break;
652 case 4:
653 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
654 BIT(AMDGPU_DPX_PARTITION_MODE) |
655 BIT(AMDGPU_CPX_PARTITION_MODE);
656 break;
657 case 2:
658 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
659 BIT(AMDGPU_CPX_PARTITION_MODE);
660 break;
661 case 1:
662 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
663 BIT(AMDGPU_CPX_PARTITION_MODE);
664 break;
665
666 default:
667 break;
668 }
669 }
670
amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)671 int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
672 {
673 /* TODO:
674 * Stop user queues and threads, and make sure GPU is empty of work.
675 */
676
677 if (flags & AMDGPU_XCP_OPS_KFD)
678 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
679
680 return 0;
681 }
682
amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)683 int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
684 {
685 int ret = 0;
686
687 if (flags & AMDGPU_XCP_OPS_KFD) {
688 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
689 amdgpu_amdkfd_device_init(xcp_mgr->adev);
690 /* If KFD init failed, return failure */
691 if (!xcp_mgr->adev->kfd.init_complete)
692 ret = -EIO;
693 }
694
695 return ret;
696 }
697
698 /*====================== xcp sysfs - configuration ======================*/
699 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
700 static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
701 struct amdgpu_xcp_res_details *xcp_res, char *buf) \
702 { \
703 return sysfs_emit(buf, "%d\n", xcp_res->_name); \
704 }
705
706 struct amdgpu_xcp_res_sysfs_attribute {
707 struct attribute attr;
708 ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
709 };
710
711 #define XCP_CFG_SYSFS_RES_ATTR(_name) \
712 struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
713 .attr = { .name = __stringify(_name), .mode = 0400 }, \
714 .show = amdgpu_xcp_res_sysfs_##_name##_show, \
715 }
716
717 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
718 XCP_CFG_SYSFS_RES_ATTR(num_inst);
719 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
720 XCP_CFG_SYSFS_RES_ATTR(num_shared);
721
722 #define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
723
724 static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
725 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
726 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
727 };
728
729 static const char *xcp_desc[] = {
730 [AMDGPU_SPX_PARTITION_MODE] = "SPX",
731 [AMDGPU_DPX_PARTITION_MODE] = "DPX",
732 [AMDGPU_TPX_PARTITION_MODE] = "TPX",
733 [AMDGPU_QPX_PARTITION_MODE] = "QPX",
734 [AMDGPU_CPX_PARTITION_MODE] = "CPX",
735 };
736
737 static const char *nps_desc[] = {
738 [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
739 [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
740 [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
741 [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
742 [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
743 [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
744 [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
745 };
746
747 ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
748
749 #define to_xcp_attr(x) \
750 container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
751 #define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
752
xcp_cfg_res_sysfs_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)753 static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
754 struct attribute *attr, char *buf)
755 {
756 struct amdgpu_xcp_res_sysfs_attribute *attribute;
757 struct amdgpu_xcp_res_details *xcp_res;
758
759 attribute = to_xcp_attr(attr);
760 xcp_res = to_xcp_res(kobj);
761
762 if (!attribute->show)
763 return -EIO;
764
765 return attribute->show(xcp_res, buf);
766 }
767
768 static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
769 .show = xcp_cfg_res_sysfs_attr_show,
770 };
771
772 static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
773 .sysfs_ops = &xcp_cfg_res_sysfs_ops,
774 .default_groups = xcp_cfg_res_sysfs_groups,
775 };
776
777 const char *xcp_res_names[] = {
778 [AMDGPU_XCP_RES_XCC] = "xcc",
779 [AMDGPU_XCP_RES_DMA] = "dma",
780 [AMDGPU_XCP_RES_DEC] = "dec",
781 [AMDGPU_XCP_RES_JPEG] = "jpeg",
782 };
783
amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr * xcp_mgr,int mode,struct amdgpu_xcp_cfg * xcp_cfg)784 static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
785 int mode,
786 struct amdgpu_xcp_cfg *xcp_cfg)
787 {
788 if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
789 return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
790
791 return -EOPNOTSUPP;
792 }
793
794 #define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
supported_xcp_configs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)795 static ssize_t supported_xcp_configs_show(struct kobject *kobj,
796 struct kobj_attribute *attr, char *buf)
797 {
798 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
799 struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
800 int size = 0, mode;
801 char *sep = "";
802
803 if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
804 return sysfs_emit(buf, "Not supported\n");
805
806 for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
807 size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
808 sep = ", ";
809 }
810
811 size += sysfs_emit_at(buf, size, "\n");
812
813 return size;
814 }
815
supported_nps_configs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)816 static ssize_t supported_nps_configs_show(struct kobject *kobj,
817 struct kobj_attribute *attr, char *buf)
818 {
819 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
820 int size = 0, mode;
821 char *sep = "";
822
823 if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
824 return sysfs_emit(buf, "Not supported\n");
825
826 for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
827 size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
828 sep = ", ";
829 }
830
831 size += sysfs_emit_at(buf, size, "\n");
832
833 return size;
834 }
835
xcp_config_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)836 static ssize_t xcp_config_show(struct kobject *kobj,
837 struct kobj_attribute *attr, char *buf)
838 {
839 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
840
841 return sysfs_emit(buf, "%s\n",
842 amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
843 }
844
xcp_config_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t size)845 static ssize_t xcp_config_store(struct kobject *kobj,
846 struct kobj_attribute *attr,
847 const char *buf, size_t size)
848 {
849 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
850 int mode, r;
851
852 if (!strncasecmp("SPX", buf, strlen("SPX")))
853 mode = AMDGPU_SPX_PARTITION_MODE;
854 else if (!strncasecmp("DPX", buf, strlen("DPX")))
855 mode = AMDGPU_DPX_PARTITION_MODE;
856 else if (!strncasecmp("TPX", buf, strlen("TPX")))
857 mode = AMDGPU_TPX_PARTITION_MODE;
858 else if (!strncasecmp("QPX", buf, strlen("QPX")))
859 mode = AMDGPU_QPX_PARTITION_MODE;
860 else if (!strncasecmp("CPX", buf, strlen("CPX")))
861 mode = AMDGPU_CPX_PARTITION_MODE;
862 else
863 return -EINVAL;
864
865 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
866
867 if (r)
868 return r;
869
870 xcp_cfg->mode = mode;
871 return size;
872 }
873
874 static struct kobj_attribute xcp_cfg_sysfs_mode =
875 __ATTR_RW_MODE(xcp_config, 0644);
876
xcp_cfg_sysfs_release(struct kobject * kobj)877 static void xcp_cfg_sysfs_release(struct kobject *kobj)
878 {
879 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
880
881 kfree(xcp_cfg);
882 }
883
884 static const struct kobj_type xcp_cfg_sysfs_ktype = {
885 .release = xcp_cfg_sysfs_release,
886 .sysfs_ops = &kobj_sysfs_ops,
887 };
888
889 static struct kobj_attribute supp_part_sysfs_mode =
890 __ATTR_RO(supported_xcp_configs);
891
892 static struct kobj_attribute supp_nps_sysfs_mode =
893 __ATTR_RO(supported_nps_configs);
894
895 static const struct attribute *xcp_attrs[] = {
896 &supp_part_sysfs_mode.attr,
897 &xcp_cfg_sysfs_mode.attr,
898 NULL,
899 };
900
amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device * adev)901 static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
902 {
903 struct amdgpu_xcp_res_details *xcp_res;
904 struct amdgpu_xcp_cfg *xcp_cfg;
905 int i, r, j, rid, mode;
906
907 if (!adev->xcp_mgr)
908 return;
909
910 xcp_cfg = kzalloc_obj(*xcp_cfg);
911 if (!xcp_cfg)
912 return;
913 xcp_cfg->xcp_mgr = adev->xcp_mgr;
914
915 r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
916 &adev->dev->kobj, "compute_partition_config");
917 if (r)
918 goto err1;
919
920 r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
921 if (r)
922 goto err1;
923
924 if (adev->gmc.supported_nps_modes != 0) {
925 r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
926 if (r) {
927 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
928 goto err1;
929 }
930 }
931
932 mode = (xcp_cfg->xcp_mgr->mode ==
933 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
934 AMDGPU_SPX_PARTITION_MODE :
935 xcp_cfg->xcp_mgr->mode;
936 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
937 if (r) {
938 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
939 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
940 goto err1;
941 }
942
943 xcp_cfg->mode = mode;
944 for (i = 0; i < xcp_cfg->num_res; i++) {
945 xcp_res = &xcp_cfg->xcp_res[i];
946 rid = xcp_res->id;
947 r = kobject_init_and_add(&xcp_res->kobj,
948 &xcp_cfg_res_sysfs_ktype,
949 &xcp_cfg->kobj, "%s",
950 xcp_res_names[rid]);
951 if (r)
952 goto err;
953 }
954
955 adev->xcp_mgr->xcp_cfg = xcp_cfg;
956 return;
957 err:
958 for (j = 0; j < i; j++) {
959 xcp_res = &xcp_cfg->xcp_res[i];
960 kobject_put(&xcp_res->kobj);
961 }
962
963 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
964 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
965 err1:
966 kobject_put(&xcp_cfg->kobj);
967 }
968
amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device * adev)969 static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
970 {
971 struct amdgpu_xcp_res_details *xcp_res;
972 struct amdgpu_xcp_cfg *xcp_cfg;
973 int i;
974
975 if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
976 return;
977
978 xcp_cfg = adev->xcp_mgr->xcp_cfg;
979 for (i = 0; i < xcp_cfg->num_res; i++) {
980 xcp_res = &xcp_cfg->xcp_res[i];
981 kobject_put(&xcp_res->kobj);
982 }
983
984 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
985 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
986 kobject_put(&xcp_cfg->kobj);
987 }
988
989 /*====================== xcp sysfs - data entries ======================*/
990
991 #define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
992
xcp_metrics_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)993 static ssize_t xcp_metrics_show(struct kobject *kobj,
994 struct kobj_attribute *attr, char *buf)
995 {
996 struct amdgpu_xcp *xcp = to_xcp(kobj);
997 struct amdgpu_xcp_mgr *xcp_mgr;
998 ssize_t size;
999
1000 xcp_mgr = xcp->xcp_mgr;
1001 size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
1002 if (size <= 0)
1003 return size;
1004
1005 if (size > PAGE_SIZE)
1006 return -ENOSPC;
1007
1008 return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
1009 }
1010
amdgpu_xcp_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int n)1011 static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
1012 struct attribute *attr, int n)
1013 {
1014 struct amdgpu_xcp *xcp = to_xcp(kobj);
1015
1016 if (!xcp || !xcp->valid)
1017 return 0;
1018
1019 return attr->mode;
1020 }
1021
1022 static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
1023
1024 static struct attribute *amdgpu_xcp_attrs[] = {
1025 &xcp_sysfs_metrics.attr,
1026 NULL,
1027 };
1028
1029 static const struct attribute_group amdgpu_xcp_attrs_group = {
1030 .attrs = amdgpu_xcp_attrs,
1031 .is_visible = amdgpu_xcp_attrs_is_visible
1032 };
1033
1034 static const struct kobj_type xcp_sysfs_ktype = {
1035 .sysfs_ops = &kobj_sysfs_ops,
1036 };
1037
amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr * xcp_mgr,int n)1038 static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
1039 {
1040 struct amdgpu_xcp *xcp;
1041
1042 for (n--; n >= 0; n--) {
1043 xcp = &xcp_mgr->xcp[n];
1044 if (!xcp->ddev || !xcp->valid)
1045 continue;
1046 sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1047 kobject_put(&xcp->kobj);
1048 }
1049 }
1050
amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr * xcp_mgr)1051 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
1052 {
1053 struct amdgpu_xcp *xcp;
1054 int i, r;
1055
1056 for (i = 0; i < MAX_XCP; i++) {
1057 /* Redirect all IOCTLs to the primary device */
1058 xcp = &xcp_mgr->xcp[i];
1059 if (!xcp->ddev)
1060 break;
1061 r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
1062 &xcp->ddev->dev->kobj, "xcp");
1063 if (r)
1064 goto out;
1065
1066 r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1067 if (r)
1068 goto out;
1069 }
1070
1071 return;
1072 out:
1073 kobject_put(&xcp->kobj);
1074 }
1075
amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr * xcp_mgr)1076 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
1077 {
1078 struct amdgpu_xcp *xcp;
1079 int i;
1080
1081 for (i = 0; i < MAX_XCP; i++) {
1082 /* Redirect all IOCTLs to the primary device */
1083 xcp = &xcp_mgr->xcp[i];
1084 if (!xcp->ddev)
1085 continue;
1086 sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1087 }
1088
1089 return;
1090 }
1091
amdgpu_xcp_sysfs_init(struct amdgpu_device * adev)1092 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
1093 {
1094 if (!adev->xcp_mgr)
1095 return;
1096
1097 amdgpu_xcp_cfg_sysfs_init(adev);
1098
1099 return;
1100 }
1101
amdgpu_xcp_sysfs_fini(struct amdgpu_device * adev)1102 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
1103 {
1104 if (!adev->xcp_mgr)
1105 return;
1106 amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
1107 amdgpu_xcp_cfg_sysfs_fini(adev);
1108 }
1109