1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29
30 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
31 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
32
__amdgpu_xcp_run(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp_ip * xcp_ip,int xcp_state)33 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
34 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
35 {
36 int (*run_func)(void *handle, uint32_t inst_mask);
37 int ret = 0;
38
39 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
40 return 0;
41
42 run_func = NULL;
43
44 switch (xcp_state) {
45 case AMDGPU_XCP_PREPARE_SUSPEND:
46 run_func = xcp_ip->ip_funcs->prepare_suspend;
47 break;
48 case AMDGPU_XCP_SUSPEND:
49 run_func = xcp_ip->ip_funcs->suspend;
50 break;
51 case AMDGPU_XCP_PREPARE_RESUME:
52 run_func = xcp_ip->ip_funcs->prepare_resume;
53 break;
54 case AMDGPU_XCP_RESUME:
55 run_func = xcp_ip->ip_funcs->resume;
56 break;
57 }
58
59 if (run_func)
60 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
61
62 return ret;
63 }
64
amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,int state)65 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
66 int state)
67 {
68 struct amdgpu_xcp_ip *xcp_ip;
69 struct amdgpu_xcp *xcp;
70 int i, ret;
71
72 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
73 return -EINVAL;
74
75 xcp = &xcp_mgr->xcp[xcp_id];
76 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
77 xcp_ip = &xcp->ip[i];
78 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
79 if (ret)
80 break;
81 }
82
83 return ret;
84 }
85
amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)86 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
87 {
88 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
89 AMDGPU_XCP_PREPARE_SUSPEND);
90 }
91
amdgpu_xcp_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)92 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
93 {
94 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
95 }
96
amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)97 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
98 {
99 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
100 AMDGPU_XCP_PREPARE_RESUME);
101 }
102
amdgpu_xcp_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)103 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
104 {
105 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
106 }
107
__amdgpu_xcp_add_block(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,struct amdgpu_xcp_ip * ip)108 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
109 struct amdgpu_xcp_ip *ip)
110 {
111 struct amdgpu_xcp *xcp;
112
113 if (!ip)
114 return;
115
116 xcp = &xcp_mgr->xcp[xcp_id];
117 xcp->ip[ip->ip_id] = *ip;
118 xcp->ip[ip->ip_id].valid = true;
119
120 xcp->valid = true;
121 }
122
amdgpu_xcp_init(struct amdgpu_xcp_mgr * xcp_mgr,int num_xcps,int mode)123 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
124 {
125 struct amdgpu_device *adev = xcp_mgr->adev;
126 struct amdgpu_xcp_ip ip;
127 uint8_t mem_id;
128 int i, j, ret;
129
130 if (!num_xcps || num_xcps > MAX_XCP)
131 return -EINVAL;
132
133 xcp_mgr->mode = mode;
134
135 for (i = 0; i < MAX_XCP; ++i)
136 xcp_mgr->xcp[i].valid = false;
137
138 /* This is needed for figuring out memory id of xcp */
139 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
140
141 for (i = 0; i < num_xcps; ++i) {
142 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
143 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
144 &ip);
145 if (ret)
146 continue;
147
148 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
149 }
150
151 xcp_mgr->xcp[i].id = i;
152
153 if (xcp_mgr->funcs->get_xcp_mem_id) {
154 ret = xcp_mgr->funcs->get_xcp_mem_id(
155 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
156 if (ret)
157 continue;
158 else
159 xcp_mgr->xcp[i].mem_id = mem_id;
160 }
161 }
162
163 xcp_mgr->num_xcps = num_xcps;
164 amdgpu_xcp_update_partition_sched_list(adev);
165
166 return 0;
167 }
168
__amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)169 static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
170 int mode)
171 {
172 int ret, curr_mode, num_xcps = 0;
173
174 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
175 return 0;
176
177 mutex_lock(&xcp_mgr->xcp_lock);
178
179 curr_mode = xcp_mgr->mode;
180 /* State set to transient mode */
181 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
182
183 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
184
185 if (ret) {
186 /* Failed, get whatever mode it's at now */
187 if (xcp_mgr->funcs->query_partition_mode)
188 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
189 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
190 else
191 xcp_mgr->mode = curr_mode;
192
193 goto out;
194 }
195 amdgpu_xcp_sysfs_entries_update(xcp_mgr);
196 out:
197 mutex_unlock(&xcp_mgr->xcp_lock);
198
199 return ret;
200 }
201
amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)202 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
203 {
204 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
205 return -EINVAL;
206
207 if (xcp_mgr->mode == mode)
208 return 0;
209
210 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
211 }
212
amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)213 int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
214 {
215 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
216 return 0;
217
218 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
219 }
220
amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)221 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
222 {
223 int mode;
224
225 if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
226 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
227 return xcp_mgr->mode;
228
229 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
230 return xcp_mgr->mode;
231
232 if (!(flags & AMDGPU_XCP_FL_LOCKED))
233 mutex_lock(&xcp_mgr->xcp_lock);
234 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
235
236 /* First time query for VF, set the mode here */
237 if (amdgpu_sriov_vf(xcp_mgr->adev) &&
238 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
239 xcp_mgr->mode = mode;
240
241 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
242 dev_WARN(
243 xcp_mgr->adev->dev,
244 "Cached partition mode %d not matching with device mode %d",
245 xcp_mgr->mode, mode);
246
247 if (!(flags & AMDGPU_XCP_FL_LOCKED))
248 mutex_unlock(&xcp_mgr->xcp_lock);
249
250 return mode;
251 }
252
amdgpu_xcp_dev_alloc(struct amdgpu_device * adev)253 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
254 {
255 struct drm_device *p_ddev;
256 struct drm_device *ddev;
257 int i, ret;
258
259 ddev = adev_to_drm(adev);
260
261 /* xcp #0 shares drm device setting with adev */
262 adev->xcp_mgr->xcp->ddev = ddev;
263
264 for (i = 1; i < MAX_XCP; i++) {
265 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
266 if (ret == -ENOSPC) {
267 dev_warn(adev->dev,
268 "Skip xcp node #%d when out of drm node resource.", i);
269 ret = 0;
270 goto out;
271 } else if (ret) {
272 goto out;
273 }
274
275 /* Redirect all IOCTLs to the primary device */
276 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
277 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
278 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
279 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
280 p_ddev->render->dev = ddev;
281 p_ddev->primary->dev = ddev;
282 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
283 p_ddev->driver = &amdgpu_partition_driver;
284 adev->xcp_mgr->xcp[i].ddev = p_ddev;
285
286 dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
287 }
288 ret = 0;
289 out:
290 amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
291
292 return ret;
293 }
294
amdgpu_xcp_mgr_init(struct amdgpu_device * adev,int init_mode,int init_num_xcps,struct amdgpu_xcp_mgr_funcs * xcp_funcs)295 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
296 int init_num_xcps,
297 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
298 {
299 struct amdgpu_xcp_mgr *xcp_mgr;
300 int i;
301
302 if (!xcp_funcs || !xcp_funcs->get_ip_details)
303 return -EINVAL;
304
305 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
306
307 if (!xcp_mgr)
308 return -ENOMEM;
309
310 xcp_mgr->adev = adev;
311 xcp_mgr->funcs = xcp_funcs;
312 xcp_mgr->mode = init_mode;
313 mutex_init(&xcp_mgr->xcp_lock);
314
315 if (init_mode != AMDGPU_XCP_MODE_NONE)
316 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
317
318 adev->xcp_mgr = xcp_mgr;
319 for (i = 0; i < MAX_XCP; ++i)
320 xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
321
322 return amdgpu_xcp_dev_alloc(adev);
323 }
324
amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr * xcp_mgr,enum AMDGPU_XCP_IP_BLOCK ip,int instance)325 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
326 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
327 {
328 struct amdgpu_xcp *xcp;
329 int i, id_mask = 0;
330
331 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
332 return -EINVAL;
333
334 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
335 xcp = &xcp_mgr->xcp[i];
336 if ((xcp->valid) && (xcp->ip[ip].valid) &&
337 (xcp->ip[ip].inst_mask & BIT(instance)))
338 id_mask |= BIT(i);
339 }
340
341 if (!id_mask)
342 id_mask = -ENXIO;
343
344 return id_mask;
345 }
346
amdgpu_xcp_get_inst_details(struct amdgpu_xcp * xcp,enum AMDGPU_XCP_IP_BLOCK ip,uint32_t * inst_mask)347 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
348 enum AMDGPU_XCP_IP_BLOCK ip,
349 uint32_t *inst_mask)
350 {
351 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
352 return -EINVAL;
353
354 *inst_mask = xcp->ip[ip].inst_mask;
355
356 return 0;
357 }
358
amdgpu_xcp_dev_register(struct amdgpu_device * adev,const struct pci_device_id * ent)359 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
360 const struct pci_device_id *ent)
361 {
362 int i, ret;
363
364 if (!adev->xcp_mgr)
365 return 0;
366
367 for (i = 1; i < MAX_XCP; i++) {
368 if (!adev->xcp_mgr->xcp[i].ddev)
369 break;
370
371 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
372 if (ret)
373 return ret;
374 }
375
376 return 0;
377 }
378
amdgpu_xcp_dev_unplug(struct amdgpu_device * adev)379 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
380 {
381 struct drm_device *p_ddev;
382 int i;
383
384 if (!adev->xcp_mgr)
385 return;
386
387 for (i = 1; i < MAX_XCP; i++) {
388 if (!adev->xcp_mgr->xcp[i].ddev)
389 break;
390
391 p_ddev = adev->xcp_mgr->xcp[i].ddev;
392 drm_dev_unplug(p_ddev);
393 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
394 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
395 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
396 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
397 }
398 }
399
amdgpu_xcp_open_device(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * file_priv)400 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
401 struct amdgpu_fpriv *fpriv,
402 struct drm_file *file_priv)
403 {
404 int i;
405
406 if (!adev->xcp_mgr)
407 return 0;
408
409 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
410 for (i = 0; i < MAX_XCP; ++i) {
411 if (!adev->xcp_mgr->xcp[i].ddev)
412 break;
413
414 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
415 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
416 dev_err(adev->dev, "renderD%d partition %d not valid!",
417 file_priv->minor->index, i);
418 return -ENOENT;
419 }
420 dev_dbg(adev->dev, "renderD%d partition %d opened!",
421 file_priv->minor->index, i);
422 fpriv->xcp_id = i;
423 break;
424 }
425 }
426
427 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
428 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
429 return 0;
430 }
431
amdgpu_xcp_release_sched(struct amdgpu_device * adev,struct amdgpu_ctx_entity * entity)432 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
433 struct amdgpu_ctx_entity *entity)
434 {
435 struct drm_gpu_scheduler *sched;
436 struct amdgpu_ring *ring;
437
438 if (!adev->xcp_mgr)
439 return;
440
441 sched = entity->entity.rq->sched;
442 if (drm_sched_wqueue_ready(sched)) {
443 ring = to_amdgpu_ring(entity->entity.rq->sched);
444 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
445 }
446 }
447
448 /*====================== xcp sysfs - configuration ======================*/
449 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
450 static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
451 struct amdgpu_xcp_res_details *xcp_res, char *buf) \
452 { \
453 return sysfs_emit(buf, "%d\n", xcp_res->_name); \
454 }
455
456 struct amdgpu_xcp_res_sysfs_attribute {
457 struct attribute attr;
458 ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
459 };
460
461 #define XCP_CFG_SYSFS_RES_ATTR(_name) \
462 struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
463 .attr = { .name = __stringify(_name), .mode = 0400 }, \
464 .show = amdgpu_xcp_res_sysfs_##_name##_show, \
465 }
466
467 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
468 XCP_CFG_SYSFS_RES_ATTR(num_inst);
469 XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
470 XCP_CFG_SYSFS_RES_ATTR(num_shared);
471
472 #define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
473
474 static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
475 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
476 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
477 };
478
479 static const char *xcp_desc[] = {
480 [AMDGPU_SPX_PARTITION_MODE] = "SPX",
481 [AMDGPU_DPX_PARTITION_MODE] = "DPX",
482 [AMDGPU_TPX_PARTITION_MODE] = "TPX",
483 [AMDGPU_QPX_PARTITION_MODE] = "QPX",
484 [AMDGPU_CPX_PARTITION_MODE] = "CPX",
485 };
486
487 static const char *nps_desc[] = {
488 [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
489 [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
490 [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
491 [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
492 [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
493 [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
494 [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
495 };
496
497 ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
498
499 #define to_xcp_attr(x) \
500 container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
501 #define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
502
xcp_cfg_res_sysfs_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)503 static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
504 struct attribute *attr, char *buf)
505 {
506 struct amdgpu_xcp_res_sysfs_attribute *attribute;
507 struct amdgpu_xcp_res_details *xcp_res;
508
509 attribute = to_xcp_attr(attr);
510 xcp_res = to_xcp_res(kobj);
511
512 if (!attribute->show)
513 return -EIO;
514
515 return attribute->show(xcp_res, buf);
516 }
517
518 static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
519 .show = xcp_cfg_res_sysfs_attr_show,
520 };
521
522 static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
523 .sysfs_ops = &xcp_cfg_res_sysfs_ops,
524 .default_groups = xcp_cfg_res_sysfs_groups,
525 };
526
527 const char *xcp_res_names[] = {
528 [AMDGPU_XCP_RES_XCC] = "xcc",
529 [AMDGPU_XCP_RES_DMA] = "dma",
530 [AMDGPU_XCP_RES_DEC] = "dec",
531 [AMDGPU_XCP_RES_JPEG] = "jpeg",
532 };
533
amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr * xcp_mgr,int mode,struct amdgpu_xcp_cfg * xcp_cfg)534 static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
535 int mode,
536 struct amdgpu_xcp_cfg *xcp_cfg)
537 {
538 if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
539 return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
540
541 return -EOPNOTSUPP;
542 }
543
544 #define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
supported_xcp_configs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)545 static ssize_t supported_xcp_configs_show(struct kobject *kobj,
546 struct kobj_attribute *attr, char *buf)
547 {
548 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
549 struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
550 int size = 0, mode;
551 char *sep = "";
552
553 if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
554 return sysfs_emit(buf, "Not supported\n");
555
556 for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
557 size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
558 sep = ", ";
559 }
560
561 size += sysfs_emit_at(buf, size, "\n");
562
563 return size;
564 }
565
supported_nps_configs_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)566 static ssize_t supported_nps_configs_show(struct kobject *kobj,
567 struct kobj_attribute *attr, char *buf)
568 {
569 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
570 int size = 0, mode;
571 char *sep = "";
572
573 if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
574 return sysfs_emit(buf, "Not supported\n");
575
576 for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
577 size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
578 sep = ", ";
579 }
580
581 size += sysfs_emit_at(buf, size, "\n");
582
583 return size;
584 }
585
xcp_config_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)586 static ssize_t xcp_config_show(struct kobject *kobj,
587 struct kobj_attribute *attr, char *buf)
588 {
589 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
590
591 return sysfs_emit(buf, "%s\n",
592 amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
593 }
594
xcp_config_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t size)595 static ssize_t xcp_config_store(struct kobject *kobj,
596 struct kobj_attribute *attr,
597 const char *buf, size_t size)
598 {
599 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
600 int mode, r;
601
602 if (!strncasecmp("SPX", buf, strlen("SPX")))
603 mode = AMDGPU_SPX_PARTITION_MODE;
604 else if (!strncasecmp("DPX", buf, strlen("DPX")))
605 mode = AMDGPU_DPX_PARTITION_MODE;
606 else if (!strncasecmp("TPX", buf, strlen("TPX")))
607 mode = AMDGPU_TPX_PARTITION_MODE;
608 else if (!strncasecmp("QPX", buf, strlen("QPX")))
609 mode = AMDGPU_QPX_PARTITION_MODE;
610 else if (!strncasecmp("CPX", buf, strlen("CPX")))
611 mode = AMDGPU_CPX_PARTITION_MODE;
612 else
613 return -EINVAL;
614
615 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
616
617 if (r)
618 return r;
619
620 xcp_cfg->mode = mode;
621 return size;
622 }
623
624 static struct kobj_attribute xcp_cfg_sysfs_mode =
625 __ATTR_RW_MODE(xcp_config, 0644);
626
xcp_cfg_sysfs_release(struct kobject * kobj)627 static void xcp_cfg_sysfs_release(struct kobject *kobj)
628 {
629 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
630
631 kfree(xcp_cfg);
632 }
633
634 static const struct kobj_type xcp_cfg_sysfs_ktype = {
635 .release = xcp_cfg_sysfs_release,
636 .sysfs_ops = &kobj_sysfs_ops,
637 };
638
639 static struct kobj_attribute supp_part_sysfs_mode =
640 __ATTR_RO(supported_xcp_configs);
641
642 static struct kobj_attribute supp_nps_sysfs_mode =
643 __ATTR_RO(supported_nps_configs);
644
645 static const struct attribute *xcp_attrs[] = {
646 &supp_part_sysfs_mode.attr,
647 &xcp_cfg_sysfs_mode.attr,
648 NULL,
649 };
650
amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device * adev)651 static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
652 {
653 struct amdgpu_xcp_res_details *xcp_res;
654 struct amdgpu_xcp_cfg *xcp_cfg;
655 int i, r, j, rid, mode;
656
657 if (!adev->xcp_mgr)
658 return;
659
660 xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
661 if (!xcp_cfg)
662 return;
663 xcp_cfg->xcp_mgr = adev->xcp_mgr;
664
665 r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
666 &adev->dev->kobj, "compute_partition_config");
667 if (r)
668 goto err1;
669
670 r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
671 if (r)
672 goto err1;
673
674 if (adev->gmc.supported_nps_modes != 0) {
675 r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
676 if (r) {
677 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
678 goto err1;
679 }
680 }
681
682 mode = (xcp_cfg->xcp_mgr->mode ==
683 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
684 AMDGPU_SPX_PARTITION_MODE :
685 xcp_cfg->xcp_mgr->mode;
686 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
687 if (r) {
688 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
689 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
690 goto err1;
691 }
692
693 xcp_cfg->mode = mode;
694 for (i = 0; i < xcp_cfg->num_res; i++) {
695 xcp_res = &xcp_cfg->xcp_res[i];
696 rid = xcp_res->id;
697 r = kobject_init_and_add(&xcp_res->kobj,
698 &xcp_cfg_res_sysfs_ktype,
699 &xcp_cfg->kobj, "%s",
700 xcp_res_names[rid]);
701 if (r)
702 goto err;
703 }
704
705 adev->xcp_mgr->xcp_cfg = xcp_cfg;
706 return;
707 err:
708 for (j = 0; j < i; j++) {
709 xcp_res = &xcp_cfg->xcp_res[i];
710 kobject_put(&xcp_res->kobj);
711 }
712
713 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
714 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
715 err1:
716 kobject_put(&xcp_cfg->kobj);
717 }
718
amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device * adev)719 static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
720 {
721 struct amdgpu_xcp_res_details *xcp_res;
722 struct amdgpu_xcp_cfg *xcp_cfg;
723 int i;
724
725 if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
726 return;
727
728 xcp_cfg = adev->xcp_mgr->xcp_cfg;
729 for (i = 0; i < xcp_cfg->num_res; i++) {
730 xcp_res = &xcp_cfg->xcp_res[i];
731 kobject_put(&xcp_res->kobj);
732 }
733
734 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
735 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
736 kobject_put(&xcp_cfg->kobj);
737 }
738
739 /*====================== xcp sysfs - data entries ======================*/
740
741 #define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
742
xcp_metrics_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)743 static ssize_t xcp_metrics_show(struct kobject *kobj,
744 struct kobj_attribute *attr, char *buf)
745 {
746 struct amdgpu_xcp *xcp = to_xcp(kobj);
747 struct amdgpu_xcp_mgr *xcp_mgr;
748 ssize_t size;
749
750 xcp_mgr = xcp->xcp_mgr;
751 size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
752 if (size <= 0)
753 return size;
754
755 if (size > PAGE_SIZE)
756 return -ENOSPC;
757
758 return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
759 }
760
amdgpu_xcp_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int n)761 static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
762 struct attribute *attr, int n)
763 {
764 struct amdgpu_xcp *xcp = to_xcp(kobj);
765
766 if (!xcp || !xcp->valid)
767 return 0;
768
769 return attr->mode;
770 }
771
772 static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
773
774 static struct attribute *amdgpu_xcp_attrs[] = {
775 &xcp_sysfs_metrics.attr,
776 NULL,
777 };
778
779 static const struct attribute_group amdgpu_xcp_attrs_group = {
780 .attrs = amdgpu_xcp_attrs,
781 .is_visible = amdgpu_xcp_attrs_is_visible
782 };
783
784 static const struct kobj_type xcp_sysfs_ktype = {
785 .sysfs_ops = &kobj_sysfs_ops,
786 };
787
amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr * xcp_mgr,int n)788 static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
789 {
790 struct amdgpu_xcp *xcp;
791
792 for (n--; n >= 0; n--) {
793 xcp = &xcp_mgr->xcp[n];
794 if (!xcp->ddev || !xcp->valid)
795 continue;
796 sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
797 kobject_put(&xcp->kobj);
798 }
799 }
800
amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr * xcp_mgr)801 static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
802 {
803 struct amdgpu_xcp *xcp;
804 int i, r;
805
806 for (i = 0; i < MAX_XCP; i++) {
807 /* Redirect all IOCTLs to the primary device */
808 xcp = &xcp_mgr->xcp[i];
809 if (!xcp->ddev)
810 break;
811 r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
812 &xcp->ddev->dev->kobj, "xcp");
813 if (r)
814 goto out;
815
816 r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
817 if (r)
818 goto out;
819 }
820
821 return;
822 out:
823 kobject_put(&xcp->kobj);
824 }
825
amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr * xcp_mgr)826 static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
827 {
828 struct amdgpu_xcp *xcp;
829 int i;
830
831 for (i = 0; i < MAX_XCP; i++) {
832 /* Redirect all IOCTLs to the primary device */
833 xcp = &xcp_mgr->xcp[i];
834 if (!xcp->ddev)
835 continue;
836 sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
837 }
838
839 return;
840 }
841
amdgpu_xcp_sysfs_init(struct amdgpu_device * adev)842 void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
843 {
844 if (!adev->xcp_mgr)
845 return;
846
847 amdgpu_xcp_cfg_sysfs_init(adev);
848
849 return;
850 }
851
amdgpu_xcp_sysfs_fini(struct amdgpu_device * adev)852 void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
853 {
854 if (!adev->xcp_mgr)
855 return;
856 amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
857 amdgpu_xcp_cfg_sysfs_fini(adev);
858 }
859