1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29
__amdgpu_xcp_run(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp_ip * xcp_ip,int xcp_state)30 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
31 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
32 {
33 int (*run_func)(void *handle, uint32_t inst_mask);
34 int ret = 0;
35
36 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
37 return 0;
38
39 run_func = NULL;
40
41 switch (xcp_state) {
42 case AMDGPU_XCP_PREPARE_SUSPEND:
43 run_func = xcp_ip->ip_funcs->prepare_suspend;
44 break;
45 case AMDGPU_XCP_SUSPEND:
46 run_func = xcp_ip->ip_funcs->suspend;
47 break;
48 case AMDGPU_XCP_PREPARE_RESUME:
49 run_func = xcp_ip->ip_funcs->prepare_resume;
50 break;
51 case AMDGPU_XCP_RESUME:
52 run_func = xcp_ip->ip_funcs->resume;
53 break;
54 }
55
56 if (run_func)
57 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
58
59 return ret;
60 }
61
amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,int state)62 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
63 int state)
64 {
65 struct amdgpu_xcp_ip *xcp_ip;
66 struct amdgpu_xcp *xcp;
67 int i, ret;
68
69 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
70 return -EINVAL;
71
72 xcp = &xcp_mgr->xcp[xcp_id];
73 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
74 xcp_ip = &xcp->ip[i];
75 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
76 if (ret)
77 break;
78 }
79
80 return ret;
81 }
82
amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)83 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
84 {
85 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
86 AMDGPU_XCP_PREPARE_SUSPEND);
87 }
88
amdgpu_xcp_suspend(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)89 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
90 {
91 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
92 }
93
amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)94 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
95 {
96 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
97 AMDGPU_XCP_PREPARE_RESUME);
98 }
99
amdgpu_xcp_resume(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id)100 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
101 {
102 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
103 }
104
__amdgpu_xcp_add_block(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,struct amdgpu_xcp_ip * ip)105 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
106 struct amdgpu_xcp_ip *ip)
107 {
108 struct amdgpu_xcp *xcp;
109
110 if (!ip)
111 return;
112
113 xcp = &xcp_mgr->xcp[xcp_id];
114 xcp->ip[ip->ip_id] = *ip;
115 xcp->ip[ip->ip_id].valid = true;
116
117 xcp->valid = true;
118 }
119
amdgpu_xcp_init(struct amdgpu_xcp_mgr * xcp_mgr,int num_xcps,int mode)120 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
121 {
122 struct amdgpu_device *adev = xcp_mgr->adev;
123 struct amdgpu_xcp_ip ip;
124 uint8_t mem_id;
125 int i, j, ret;
126
127 if (!num_xcps || num_xcps > MAX_XCP)
128 return -EINVAL;
129
130 xcp_mgr->mode = mode;
131
132 for (i = 0; i < MAX_XCP; ++i)
133 xcp_mgr->xcp[i].valid = false;
134
135 /* This is needed for figuring out memory id of xcp */
136 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
137
138 for (i = 0; i < num_xcps; ++i) {
139 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
140 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
141 &ip);
142 if (ret)
143 continue;
144
145 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
146 }
147
148 xcp_mgr->xcp[i].id = i;
149
150 if (xcp_mgr->funcs->get_xcp_mem_id) {
151 ret = xcp_mgr->funcs->get_xcp_mem_id(
152 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
153 if (ret)
154 continue;
155 else
156 xcp_mgr->xcp[i].mem_id = mem_id;
157 }
158 }
159
160 xcp_mgr->num_xcps = num_xcps;
161 amdgpu_xcp_update_partition_sched_list(adev);
162
163 return 0;
164 }
165
__amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)166 static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
167 int mode)
168 {
169 int ret, curr_mode, num_xcps = 0;
170
171 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
172 return 0;
173
174 mutex_lock(&xcp_mgr->xcp_lock);
175
176 curr_mode = xcp_mgr->mode;
177 /* State set to transient mode */
178 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
179
180 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
181
182 if (ret) {
183 /* Failed, get whatever mode it's at now */
184 if (xcp_mgr->funcs->query_partition_mode)
185 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
186 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
187 else
188 xcp_mgr->mode = curr_mode;
189
190 goto out;
191 }
192
193 out:
194 mutex_unlock(&xcp_mgr->xcp_lock);
195
196 return ret;
197 }
198
amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode)199 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
200 {
201 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
202 return -EINVAL;
203
204 if (xcp_mgr->mode == mode)
205 return 0;
206
207 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
208 }
209
amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)210 int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
211 {
212 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
213 return 0;
214
215 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
216 }
217
amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)218 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
219 {
220 int mode;
221
222 if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
223 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
224 return xcp_mgr->mode;
225
226 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
227 return xcp_mgr->mode;
228
229 if (!(flags & AMDGPU_XCP_FL_LOCKED))
230 mutex_lock(&xcp_mgr->xcp_lock);
231 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
232
233 /* First time query for VF, set the mode here */
234 if (amdgpu_sriov_vf(xcp_mgr->adev) &&
235 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
236 xcp_mgr->mode = mode;
237
238 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
239 dev_WARN(
240 xcp_mgr->adev->dev,
241 "Cached partition mode %d not matching with device mode %d",
242 xcp_mgr->mode, mode);
243
244 if (!(flags & AMDGPU_XCP_FL_LOCKED))
245 mutex_unlock(&xcp_mgr->xcp_lock);
246
247 return mode;
248 }
249
amdgpu_xcp_dev_alloc(struct amdgpu_device * adev)250 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
251 {
252 struct drm_device *p_ddev;
253 struct drm_device *ddev;
254 int i, ret;
255
256 ddev = adev_to_drm(adev);
257
258 /* xcp #0 shares drm device setting with adev */
259 adev->xcp_mgr->xcp->ddev = ddev;
260
261 for (i = 1; i < MAX_XCP; i++) {
262 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
263 if (ret == -ENOSPC) {
264 dev_warn(adev->dev,
265 "Skip xcp node #%d when out of drm node resource.", i);
266 return 0;
267 } else if (ret) {
268 return ret;
269 }
270
271 /* Redirect all IOCTLs to the primary device */
272 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
273 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
274 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
275 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
276 p_ddev->render->dev = ddev;
277 p_ddev->primary->dev = ddev;
278 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
279 p_ddev->driver = &amdgpu_partition_driver;
280 adev->xcp_mgr->xcp[i].ddev = p_ddev;
281 }
282
283 return 0;
284 }
285
amdgpu_xcp_mgr_init(struct amdgpu_device * adev,int init_mode,int init_num_xcps,struct amdgpu_xcp_mgr_funcs * xcp_funcs)286 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
287 int init_num_xcps,
288 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
289 {
290 struct amdgpu_xcp_mgr *xcp_mgr;
291
292 if (!xcp_funcs || !xcp_funcs->get_ip_details)
293 return -EINVAL;
294
295 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
296
297 if (!xcp_mgr)
298 return -ENOMEM;
299
300 xcp_mgr->adev = adev;
301 xcp_mgr->funcs = xcp_funcs;
302 xcp_mgr->mode = init_mode;
303 mutex_init(&xcp_mgr->xcp_lock);
304
305 if (init_mode != AMDGPU_XCP_MODE_NONE)
306 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
307
308 adev->xcp_mgr = xcp_mgr;
309
310 return amdgpu_xcp_dev_alloc(adev);
311 }
312
amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr * xcp_mgr,enum AMDGPU_XCP_IP_BLOCK ip,int instance)313 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
314 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
315 {
316 struct amdgpu_xcp *xcp;
317 int i, id_mask = 0;
318
319 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
320 return -EINVAL;
321
322 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
323 xcp = &xcp_mgr->xcp[i];
324 if ((xcp->valid) && (xcp->ip[ip].valid) &&
325 (xcp->ip[ip].inst_mask & BIT(instance)))
326 id_mask |= BIT(i);
327 }
328
329 if (!id_mask)
330 id_mask = -ENXIO;
331
332 return id_mask;
333 }
334
amdgpu_xcp_get_inst_details(struct amdgpu_xcp * xcp,enum AMDGPU_XCP_IP_BLOCK ip,uint32_t * inst_mask)335 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
336 enum AMDGPU_XCP_IP_BLOCK ip,
337 uint32_t *inst_mask)
338 {
339 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
340 return -EINVAL;
341
342 *inst_mask = xcp->ip[ip].inst_mask;
343
344 return 0;
345 }
346
amdgpu_xcp_dev_register(struct amdgpu_device * adev,const struct pci_device_id * ent)347 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
348 const struct pci_device_id *ent)
349 {
350 int i, ret;
351
352 if (!adev->xcp_mgr)
353 return 0;
354
355 for (i = 1; i < MAX_XCP; i++) {
356 if (!adev->xcp_mgr->xcp[i].ddev)
357 break;
358
359 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
360 if (ret)
361 return ret;
362 }
363
364 return 0;
365 }
366
amdgpu_xcp_dev_unplug(struct amdgpu_device * adev)367 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
368 {
369 struct drm_device *p_ddev;
370 int i;
371
372 if (!adev->xcp_mgr)
373 return;
374
375 for (i = 1; i < MAX_XCP; i++) {
376 if (!adev->xcp_mgr->xcp[i].ddev)
377 break;
378
379 p_ddev = adev->xcp_mgr->xcp[i].ddev;
380 drm_dev_unplug(p_ddev);
381 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
382 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
383 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
384 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
385 }
386 }
387
amdgpu_xcp_open_device(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * file_priv)388 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
389 struct amdgpu_fpriv *fpriv,
390 struct drm_file *file_priv)
391 {
392 int i;
393
394 if (!adev->xcp_mgr)
395 return 0;
396
397 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
398 for (i = 0; i < MAX_XCP; ++i) {
399 if (!adev->xcp_mgr->xcp[i].ddev)
400 break;
401
402 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
403 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
404 dev_err(adev->dev, "renderD%d partition %d not valid!",
405 file_priv->minor->index, i);
406 return -ENOENT;
407 }
408 dev_dbg(adev->dev, "renderD%d partition %d opened!",
409 file_priv->minor->index, i);
410 fpriv->xcp_id = i;
411 break;
412 }
413 }
414
415 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
416 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
417 return 0;
418 }
419
amdgpu_xcp_release_sched(struct amdgpu_device * adev,struct amdgpu_ctx_entity * entity)420 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
421 struct amdgpu_ctx_entity *entity)
422 {
423 struct drm_gpu_scheduler *sched;
424 struct amdgpu_ring *ring;
425
426 if (!adev->xcp_mgr)
427 return;
428
429 sched = entity->entity.rq->sched;
430 if (sched->ready) {
431 ring = to_amdgpu_ring(entity->entity.rq->sched);
432 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
433 }
434 }
435
436