1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bsearch.h>
8
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11
12 #include "abi/guc_actions_sriov_abi.h"
13 #include "abi/guc_communication_mmio_abi.h"
14 #include "abi/guc_klvs_abi.h"
15 #include "abi/guc_relay_actions_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18
19 #include "xe_assert.h"
20 #include "xe_device.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_gt_sriov_vf_types.h"
25 #include "xe_guc.h"
26 #include "xe_guc_hxg_helpers.h"
27 #include "xe_guc_relay.h"
28 #include "xe_mmio.h"
29 #include "xe_sriov.h"
30 #include "xe_sriov_vf.h"
31 #include "xe_uc_fw.h"
32 #include "xe_wopcm.h"
33
34 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
35
guc_action_vf_reset(struct xe_guc * guc)36 static int guc_action_vf_reset(struct xe_guc *guc)
37 {
38 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
39 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
40 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
41 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET),
42 };
43 int ret;
44
45 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
46
47 return ret > 0 ? -EPROTO : ret;
48 }
49
vf_reset_guc_state(struct xe_gt * gt)50 static int vf_reset_guc_state(struct xe_gt *gt)
51 {
52 struct xe_guc *guc = >->uc.guc;
53 int err;
54
55 err = guc_action_vf_reset(guc);
56 if (unlikely(err))
57 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
58 return err;
59 }
60
guc_action_match_version(struct xe_guc * guc,u32 wanted_branch,u32 wanted_major,u32 wanted_minor,u32 * branch,u32 * major,u32 * minor,u32 * patch)61 static int guc_action_match_version(struct xe_guc *guc,
62 u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
63 u32 *branch, u32 *major, u32 *minor, u32 *patch)
64 {
65 u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
66 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
67 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
68 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
69 GUC_ACTION_VF2GUC_MATCH_VERSION),
70 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
71 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
72 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
73 };
74 u32 response[GUC_MAX_MMIO_MSG_LEN];
75 int ret;
76
77 BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN);
78
79 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
80 if (unlikely(ret < 0))
81 return ret;
82
83 if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
84 return -EPROTO;
85
86 *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
87 *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
88 *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
89 *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
90
91 return 0;
92 }
93
vf_minimum_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)94 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
95 {
96 struct xe_device *xe = gt_to_xe(gt);
97
98 switch (xe->info.platform) {
99 case XE_TIGERLAKE ... XE_PVC:
100 /* 1.1 this is current baseline for Xe driver */
101 *branch = 0;
102 *major = 1;
103 *minor = 1;
104 break;
105 default:
106 /* 1.2 has support for the GMD_ID KLV */
107 *branch = 0;
108 *major = 1;
109 *minor = 2;
110 break;
111 }
112 }
113
vf_wanted_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)114 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
115 {
116 /* for now it's the same as minimum */
117 return vf_minimum_guc_version(gt, branch, major, minor);
118 }
119
vf_handshake_with_guc(struct xe_gt * gt)120 static int vf_handshake_with_guc(struct xe_gt *gt)
121 {
122 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version;
123 struct xe_guc *guc = >->uc.guc;
124 u32 wanted_branch, wanted_major, wanted_minor;
125 u32 branch, major, minor, patch;
126 int err;
127
128 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
129
130 /* select wanted version - prefer previous (if any) */
131 if (guc_version->major || guc_version->minor) {
132 wanted_branch = guc_version->branch;
133 wanted_major = guc_version->major;
134 wanted_minor = guc_version->minor;
135 } else {
136 vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
137 xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
138 }
139
140 err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
141 &branch, &major, &minor, &patch);
142 if (unlikely(err))
143 goto fail;
144
145 /* we don't support interface version change */
146 if ((guc_version->major || guc_version->minor) &&
147 (guc_version->branch != branch || guc_version->major != major ||
148 guc_version->minor != minor)) {
149 xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
150 branch, major, minor, patch);
151 xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
152 guc_version->branch, guc_version->major,
153 guc_version->minor, guc_version->patch);
154 err = -EREMCHG;
155 goto fail;
156 }
157
158 /* illegal */
159 if (major > wanted_major) {
160 err = -EPROTO;
161 goto unsupported;
162 }
163
164 /* there's no fallback on major version. */
165 if (major != wanted_major) {
166 err = -ENOPKG;
167 goto unsupported;
168 }
169
170 /* check against minimum version supported by us */
171 vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
172 xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
173 if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
174 err = -ENOKEY;
175 goto unsupported;
176 }
177
178 xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
179 branch, major, minor, patch);
180
181 guc_version->branch = branch;
182 guc_version->major = major;
183 guc_version->minor = minor;
184 guc_version->patch = patch;
185 return 0;
186
187 unsupported:
188 xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
189 branch, major, minor, patch, ERR_PTR(err));
190 fail:
191 xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
192 wanted_major, wanted_minor, ERR_PTR(err));
193
194 /* try again with *any* just to query which version is supported */
195 if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
196 GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
197 &branch, &major, &minor, &patch))
198 xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
199 branch, major, minor, patch);
200 return err;
201 }
202
203 /**
204 * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version.
205 * @gt: the &xe_gt
206 *
207 * This function is for VF use only.
208 * It requires functional `GuC MMIO based communication`_.
209 *
210 * Return: 0 on success or a negative error code on failure.
211 */
xe_gt_sriov_vf_bootstrap(struct xe_gt * gt)212 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
213 {
214 int err;
215
216 err = vf_reset_guc_state(gt);
217 if (unlikely(err))
218 return err;
219
220 err = vf_handshake_with_guc(gt);
221 if (unlikely(err))
222 return err;
223
224 return 0;
225 }
226
guc_action_vf_notify_resfix_done(struct xe_guc * guc)227 static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
228 {
229 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
230 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
231 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
232 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
233 };
234 int ret;
235
236 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
237
238 return ret > 0 ? -EPROTO : ret;
239 }
240
241 /**
242 * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
243 * @gt: the &xe_gt struct instance linked to target GuC
244 *
245 * Returns: 0 if the operation completed successfully, or a negative error
246 * code otherwise.
247 */
xe_gt_sriov_vf_notify_resfix_done(struct xe_gt * gt)248 int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
249 {
250 struct xe_guc *guc = >->uc.guc;
251 int err;
252
253 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
254
255 err = guc_action_vf_notify_resfix_done(guc);
256 if (unlikely(err))
257 xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
258 ERR_PTR(err));
259 else
260 xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
261
262 return err;
263 }
264
guc_action_query_single_klv(struct xe_guc * guc,u32 key,u32 * value,u32 value_len)265 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
266 u32 *value, u32 value_len)
267 {
268 u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = {
269 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
270 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
271 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
272 GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV),
273 FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key),
274 };
275 u32 response[GUC_MAX_MMIO_MSG_LEN];
276 u32 length;
277 int ret;
278
279 BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN);
280 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
281 if (unlikely(ret < 0))
282 return ret;
283
284 if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0])))
285 return -EPROTO;
286
287 length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]);
288 if (unlikely(length > value_len))
289 return -EOVERFLOW;
290 if (unlikely(length < value_len))
291 return -ENODATA;
292
293 switch (value_len) {
294 default:
295 xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3);
296 fallthrough;
297 case 3:
298 value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]);
299 fallthrough;
300 case 2:
301 value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]);
302 fallthrough;
303 case 1:
304 value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]);
305 fallthrough;
306 case 0:
307 break;
308 }
309
310 return 0;
311 }
312
guc_action_query_single_klv32(struct xe_guc * guc,u32 key,u32 * value32)313 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32)
314 {
315 return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32));
316 }
317
guc_action_query_single_klv64(struct xe_guc * guc,u32 key,u64 * value64)318 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64)
319 {
320 u32 value[2];
321 int err;
322
323 err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value));
324 if (unlikely(err))
325 return err;
326
327 *value64 = make_u64_from_u32(value[1], value[0]);
328 return 0;
329 }
330
has_gmdid(struct xe_device * xe)331 static bool has_gmdid(struct xe_device *xe)
332 {
333 return GRAPHICS_VERx100(xe) >= 1270;
334 }
335
336 /**
337 * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO.
338 * @gt: the &xe_gt
339 *
340 * This function is for VF use only.
341 *
342 * Return: value of GMDID KLV on success or 0 on failure.
343 */
xe_gt_sriov_vf_gmdid(struct xe_gt * gt)344 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
345 {
346 const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics";
347 struct xe_guc *guc = >->uc.guc;
348 u32 value;
349 int err;
350
351 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
352 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt)));
353 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2);
354
355 err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value);
356 if (unlikely(err)) {
357 xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n",
358 type, ERR_PTR(err));
359 return 0;
360 }
361
362 xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value);
363 return value;
364 }
365
vf_get_ggtt_info(struct xe_gt * gt)366 static int vf_get_ggtt_info(struct xe_gt *gt)
367 {
368 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
369 struct xe_guc *guc = >->uc.guc;
370 u64 start, size;
371 int err;
372
373 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
374
375 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
376 if (unlikely(err))
377 return err;
378
379 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size);
380 if (unlikely(err))
381 return err;
382
383 if (config->ggtt_size && config->ggtt_size != size) {
384 xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
385 size / SZ_1K, config->ggtt_size / SZ_1K);
386 return -EREMCHG;
387 }
388
389 xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
390 start, start + size - 1, size / SZ_1K);
391
392 config->ggtt_base = start;
393 config->ggtt_size = size;
394
395 return config->ggtt_size ? 0 : -ENODATA;
396 }
397
vf_get_lmem_info(struct xe_gt * gt)398 static int vf_get_lmem_info(struct xe_gt *gt)
399 {
400 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
401 struct xe_guc *guc = >->uc.guc;
402 char size_str[10];
403 u64 size;
404 int err;
405
406 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
407
408 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
409 if (unlikely(err))
410 return err;
411
412 if (config->lmem_size && config->lmem_size != size) {
413 xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
414 size / SZ_1M, config->lmem_size / SZ_1M);
415 return -EREMCHG;
416 }
417
418 string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
419 xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
420
421 config->lmem_size = size;
422
423 return config->lmem_size ? 0 : -ENODATA;
424 }
425
vf_get_submission_cfg(struct xe_gt * gt)426 static int vf_get_submission_cfg(struct xe_gt *gt)
427 {
428 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
429 struct xe_guc *guc = >->uc.guc;
430 u32 num_ctxs, num_dbs;
431 int err;
432
433 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
434
435 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
436 if (unlikely(err))
437 return err;
438
439 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs);
440 if (unlikely(err))
441 return err;
442
443 if (config->num_ctxs && config->num_ctxs != num_ctxs) {
444 xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n",
445 num_ctxs, config->num_ctxs);
446 return -EREMCHG;
447 }
448 if (config->num_dbs && config->num_dbs != num_dbs) {
449 xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n",
450 num_dbs, config->num_dbs);
451 return -EREMCHG;
452 }
453
454 xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs);
455
456 config->num_ctxs = num_ctxs;
457 config->num_dbs = num_dbs;
458
459 return config->num_ctxs ? 0 : -ENODATA;
460 }
461
vf_cache_gmdid(struct xe_gt * gt)462 static void vf_cache_gmdid(struct xe_gt *gt)
463 {
464 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt)));
465 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
466
467 gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt);
468 }
469
470 /**
471 * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
472 * @gt: the &xe_gt
473 *
474 * This function is for VF use only.
475 *
476 * Return: 0 on success or a negative error code on failure.
477 */
xe_gt_sriov_vf_query_config(struct xe_gt * gt)478 int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
479 {
480 struct xe_device *xe = gt_to_xe(gt);
481 int err;
482
483 err = vf_get_ggtt_info(gt);
484 if (unlikely(err))
485 return err;
486
487 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
488 err = vf_get_lmem_info(gt);
489 if (unlikely(err))
490 return err;
491 }
492
493 err = vf_get_submission_cfg(gt);
494 if (unlikely(err))
495 return err;
496
497 if (has_gmdid(xe))
498 vf_cache_gmdid(gt);
499
500 return 0;
501 }
502
503 /**
504 * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration.
505 * @gt: the &xe_gt
506 *
507 * This function is for VF use only.
508 *
509 * Return: number of GuC context IDs assigned to VF.
510 */
xe_gt_sriov_vf_guc_ids(struct xe_gt * gt)511 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
512 {
513 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
514 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
515 xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
516
517 return gt->sriov.vf.self_config.num_ctxs;
518 }
519
520 /**
521 * xe_gt_sriov_vf_lmem - VF LMEM configuration.
522 * @gt: the &xe_gt
523 *
524 * This function is for VF use only.
525 *
526 * Return: size of the LMEM assigned to VF.
527 */
xe_gt_sriov_vf_lmem(struct xe_gt * gt)528 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
529 {
530 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
531 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
532 xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
533
534 return gt->sriov.vf.self_config.lmem_size;
535 }
536
537 static struct xe_ggtt_node *
vf_balloon_ggtt_node(struct xe_ggtt * ggtt,u64 start,u64 end)538 vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
539 {
540 struct xe_ggtt_node *node;
541 int err;
542
543 node = xe_ggtt_node_init(ggtt);
544 if (IS_ERR(node))
545 return node;
546
547 err = xe_ggtt_node_insert_balloon(node, start, end);
548 if (err) {
549 xe_ggtt_node_fini(node);
550 return ERR_PTR(err);
551 }
552
553 return node;
554 }
555
vf_balloon_ggtt(struct xe_gt * gt)556 static int vf_balloon_ggtt(struct xe_gt *gt)
557 {
558 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
559 struct xe_tile *tile = gt_to_tile(gt);
560 struct xe_ggtt *ggtt = tile->mem.ggtt;
561 struct xe_device *xe = gt_to_xe(gt);
562 u64 start, end;
563
564 xe_gt_assert(gt, IS_SRIOV_VF(xe));
565 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
566
567 if (!config->ggtt_size)
568 return -ENODATA;
569
570 /*
571 * VF can only use part of the GGTT as allocated by the PF:
572 *
573 * WOPCM GUC_GGTT_TOP
574 * |<------------ Total GGTT size ------------------>|
575 *
576 * VF GGTT base -->|<- size ->|
577 *
578 * +--------------------+----------+-----------------+
579 * |////////////////////| block |\\\\\\\\\\\\\\\\\|
580 * +--------------------+----------+-----------------+
581 *
582 * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
583 */
584
585 start = xe_wopcm_size(xe);
586 end = config->ggtt_base;
587 if (end != start) {
588 tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
589 if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
590 return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
591 }
592
593 start = config->ggtt_base + config->ggtt_size;
594 end = GUC_GGTT_TOP;
595 if (end != start) {
596 tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
597 if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
598 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
599 return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
600 }
601 }
602
603 return 0;
604 }
605
deballoon_ggtt(struct drm_device * drm,void * arg)606 static void deballoon_ggtt(struct drm_device *drm, void *arg)
607 {
608 struct xe_tile *tile = arg;
609
610 xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
611 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
612 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
613 }
614
615 /**
616 * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
617 * @gt: the &xe_gt
618 *
619 * This function is for VF use only.
620 *
621 * Return: 0 on success or a negative error code on failure.
622 */
xe_gt_sriov_vf_prepare_ggtt(struct xe_gt * gt)623 int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
624 {
625 struct xe_tile *tile = gt_to_tile(gt);
626 struct xe_device *xe = tile_to_xe(tile);
627 int err;
628
629 if (xe_gt_is_media_type(gt))
630 return 0;
631
632 err = vf_balloon_ggtt(gt);
633 if (err)
634 return err;
635
636 return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
637 }
638
relay_action_handshake(struct xe_gt * gt,u32 * major,u32 * minor)639 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
640 {
641 u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
642 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
643 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
644 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE),
645 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) |
646 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor),
647 };
648 u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN];
649 int ret;
650
651 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
652
653 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
654 request, ARRAY_SIZE(request),
655 response, ARRAY_SIZE(response));
656 if (unlikely(ret < 0))
657 return ret;
658
659 if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN))
660 return -EPROTO;
661
662 if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0])))
663 return -EPROTO;
664
665 *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]);
666 *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]);
667
668 return 0;
669 }
670
vf_connect_pf(struct xe_gt * gt,u16 major,u16 minor)671 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
672 {
673 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
674
675 gt->sriov.vf.pf_version.major = major;
676 gt->sriov.vf.pf_version.minor = minor;
677 }
678
vf_disconnect_pf(struct xe_gt * gt)679 static void vf_disconnect_pf(struct xe_gt *gt)
680 {
681 vf_connect_pf(gt, 0, 0);
682 }
683
vf_handshake_with_pf(struct xe_gt * gt)684 static int vf_handshake_with_pf(struct xe_gt *gt)
685 {
686 u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
687 u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
688 u32 major = major_wanted, minor = minor_wanted;
689 int err;
690
691 err = relay_action_handshake(gt, &major, &minor);
692 if (unlikely(err))
693 goto failed;
694
695 if (!major && !minor) {
696 err = -ENODATA;
697 goto failed;
698 }
699
700 xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
701 vf_connect_pf(gt, major, minor);
702 return 0;
703
704 failed:
705 xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
706 major, minor, ERR_PTR(err));
707 vf_disconnect_pf(gt);
708 return err;
709 }
710
711 /**
712 * xe_gt_sriov_vf_connect - Establish connection with the PF driver.
713 * @gt: the &xe_gt
714 *
715 * This function is for VF use only.
716 *
717 * Return: 0 on success or a negative error code on failure.
718 */
xe_gt_sriov_vf_connect(struct xe_gt * gt)719 int xe_gt_sriov_vf_connect(struct xe_gt *gt)
720 {
721 int err;
722
723 err = vf_handshake_with_pf(gt);
724 if (unlikely(err))
725 goto failed;
726
727 return 0;
728
729 failed:
730 xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err));
731 return err;
732 }
733
734 /**
735 * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
736 * or just mark that a GuC is ready for it.
737 * @gt: the &xe_gt struct instance linked to target GuC
738 *
739 * This function shall be called only by VF.
740 */
xe_gt_sriov_vf_migrated_event_handler(struct xe_gt * gt)741 void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
742 {
743 struct xe_device *xe = gt_to_xe(gt);
744
745 xe_gt_assert(gt, IS_SRIOV_VF(xe));
746
747 set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
748 /*
749 * We need to be certain that if all flags were set, at least one
750 * thread will notice that and schedule the recovery.
751 */
752 smp_mb__after_atomic();
753
754 xe_gt_sriov_info(gt, "ready for recovery after migration\n");
755 xe_sriov_vf_start_migration_recovery(xe);
756 }
757
vf_is_negotiated(struct xe_gt * gt,u16 major,u16 minor)758 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
759 {
760 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
761
762 return major == gt->sriov.vf.pf_version.major &&
763 minor <= gt->sriov.vf.pf_version.minor;
764 }
765
vf_prepare_runtime_info(struct xe_gt * gt,unsigned int num_regs)766 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
767 {
768 struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs;
769 unsigned int regs_size = round_up(num_regs, 4);
770 struct xe_device *xe = gt_to_xe(gt);
771
772 xe_gt_assert(gt, IS_SRIOV_VF(xe));
773
774 if (regs) {
775 if (num_regs <= gt->sriov.vf.runtime.regs_size) {
776 memset(regs, 0, num_regs * sizeof(*regs));
777 gt->sriov.vf.runtime.num_regs = num_regs;
778 return 0;
779 }
780
781 drmm_kfree(&xe->drm, regs);
782 gt->sriov.vf.runtime.regs = NULL;
783 gt->sriov.vf.runtime.num_regs = 0;
784 gt->sriov.vf.runtime.regs_size = 0;
785 }
786
787 regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL);
788 if (unlikely(!regs))
789 return -ENOMEM;
790
791 gt->sriov.vf.runtime.regs = regs;
792 gt->sriov.vf.runtime.num_regs = num_regs;
793 gt->sriov.vf.runtime.regs_size = regs_size;
794 return 0;
795 }
796
vf_query_runtime_info(struct xe_gt * gt)797 static int vf_query_runtime_info(struct xe_gt *gt)
798 {
799 u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN];
800 u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */
801 u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
802 u32 count, remaining, num, i;
803 u32 start = 0;
804 int ret;
805
806 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
807 xe_gt_assert(gt, limit);
808
809 /* this is part of the 1.0 PF/VF ABI */
810 if (!vf_is_negotiated(gt, 1, 0))
811 return -ENOPKG;
812
813 request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
814 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
815 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
816 GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) |
817 FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit);
818
819 repeat:
820 request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start);
821 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
822 request, ARRAY_SIZE(request),
823 response, ARRAY_SIZE(response));
824 if (unlikely(ret < 0))
825 goto failed;
826
827 if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) {
828 ret = -EPROTO;
829 goto failed;
830 }
831 if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) {
832 ret = -EPROTO;
833 goto failed;
834 }
835
836 num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
837 count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]);
838 remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]);
839
840 xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n",
841 count, num, ret, start, remaining);
842
843 if (unlikely(count != num)) {
844 ret = -EPROTO;
845 goto failed;
846 }
847
848 if (start == 0) {
849 ret = vf_prepare_runtime_info(gt, num + remaining);
850 if (unlikely(ret < 0))
851 goto failed;
852 } else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) {
853 ret = -EPROTO;
854 goto failed;
855 }
856
857 for (i = 0; i < num; ++i) {
858 struct vf_runtime_reg *reg = >->sriov.vf.runtime.regs[start + i];
859
860 reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i];
861 reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1];
862 }
863
864 if (remaining) {
865 start += num;
866 goto repeat;
867 }
868
869 return 0;
870
871 failed:
872 vf_prepare_runtime_info(gt, 0);
873 return ret;
874 }
875
vf_show_runtime_info(struct xe_gt * gt)876 static void vf_show_runtime_info(struct xe_gt *gt)
877 {
878 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
879 unsigned int size = gt->sriov.vf.runtime.num_regs;
880
881 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
882
883 for (; size--; vf_regs++)
884 xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n",
885 vf_regs->offset, vf_regs->value);
886 }
887
888 /**
889 * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data.
890 * @gt: the &xe_gt
891 *
892 * This function is for VF use only.
893 *
894 * Return: 0 on success or a negative error code on failure.
895 */
xe_gt_sriov_vf_query_runtime(struct xe_gt * gt)896 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
897 {
898 int err;
899
900 err = vf_query_runtime_info(gt);
901 if (unlikely(err))
902 goto failed;
903
904 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
905 vf_show_runtime_info(gt);
906
907 return 0;
908
909 failed:
910 xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
911 ERR_PTR(err));
912 return err;
913 }
914
vf_runtime_reg_cmp(const void * a,const void * b)915 static int vf_runtime_reg_cmp(const void *a, const void *b)
916 {
917 const struct vf_runtime_reg *ra = a;
918 const struct vf_runtime_reg *rb = b;
919
920 return (int)ra->offset - (int)rb->offset;
921 }
922
vf_lookup_reg(struct xe_gt * gt,u32 addr)923 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
924 {
925 struct xe_gt_sriov_vf_runtime *runtime = >->sriov.vf.runtime;
926 struct vf_runtime_reg key = { .offset = addr };
927
928 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
929
930 return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
931 vf_runtime_reg_cmp);
932 }
933
934 /**
935 * xe_gt_sriov_vf_read32 - Get a register value from the runtime data.
936 * @gt: the &xe_gt
937 * @reg: the register to read
938 *
939 * This function is for VF use only.
940 * This function shall be called after VF has connected to PF.
941 * This function is dedicated for registers that VFs can't read directly.
942 *
943 * Return: register value obtained from the PF or 0 if not found.
944 */
xe_gt_sriov_vf_read32(struct xe_gt * gt,struct xe_reg reg)945 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
946 {
947 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr);
948 struct vf_runtime_reg *rr;
949
950 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
951 xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
952 xe_gt_assert(gt, !reg.vf);
953
954 if (reg.addr == GMD_ID.addr) {
955 xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n",
956 addr, gt->sriov.vf.runtime.gmdid);
957 return gt->sriov.vf.runtime.gmdid;
958 }
959
960 rr = vf_lookup_reg(gt, addr);
961 if (!rr) {
962 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
963 "VF is trying to read an inaccessible register %#x+%#x\n",
964 reg.addr, addr - reg.addr);
965 return 0;
966 }
967
968 xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value);
969 return rr->value;
970 }
971
972 /**
973 * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
974 * @gt: the &xe_gt
975 * @reg: the register to write
976 * @val: value to write
977 *
978 * This function is for VF use only.
979 * Currently it will trigger a WARN if running on debug build.
980 */
xe_gt_sriov_vf_write32(struct xe_gt * gt,struct xe_reg reg,u32 val)981 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
982 {
983 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr);
984
985 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
986 xe_gt_assert(gt, !reg.vf);
987
988 /*
989 * In the future, we may want to handle selected writes to inaccessible
990 * registers in some custom way, but for now let's just log a warning
991 * about such attempt, as likely we might be doing something wrong.
992 */
993 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
994 "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
995 val, reg.addr, addr - reg.addr);
996 }
997
998 /**
999 * xe_gt_sriov_vf_print_config - Print VF self config.
1000 * @gt: the &xe_gt
1001 * @p: the &drm_printer
1002 *
1003 * This function is for VF use only.
1004 */
xe_gt_sriov_vf_print_config(struct xe_gt * gt,struct drm_printer * p)1005 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
1006 {
1007 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
1008 struct xe_device *xe = gt_to_xe(gt);
1009 char buf[10];
1010
1011 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1012
1013 drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
1014 config->ggtt_base,
1015 config->ggtt_base + config->ggtt_size - 1);
1016
1017 string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1018 drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
1019
1020 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
1021 string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1022 drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
1023 }
1024
1025 drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
1026 drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
1027 }
1028
1029 /**
1030 * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF.
1031 * @gt: the &xe_gt
1032 * @p: the &drm_printer
1033 *
1034 * This function is for VF use only.
1035 */
xe_gt_sriov_vf_print_runtime(struct xe_gt * gt,struct drm_printer * p)1036 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
1037 {
1038 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
1039 unsigned int size = gt->sriov.vf.runtime.num_regs;
1040
1041 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1042
1043 for (; size--; vf_regs++)
1044 drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value);
1045 }
1046
1047 /**
1048 * xe_gt_sriov_vf_print_version - Print VF ABI versions.
1049 * @gt: the &xe_gt
1050 * @p: the &drm_printer
1051 *
1052 * This function is for VF use only.
1053 */
xe_gt_sriov_vf_print_version(struct xe_gt * gt,struct drm_printer * p)1054 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
1055 {
1056 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version;
1057 struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version;
1058 u32 branch, major, minor;
1059
1060 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1061
1062 drm_printf(p, "GuC ABI:\n");
1063
1064 vf_minimum_guc_version(gt, &branch, &major, &minor);
1065 drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
1066
1067 vf_wanted_guc_version(gt, &branch, &major, &minor);
1068 drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
1069
1070 drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
1071 guc_version->branch, guc_version->major,
1072 guc_version->minor, guc_version->patch);
1073
1074 drm_printf(p, "PF ABI:\n");
1075
1076 drm_printf(p, "\tbase:\t%u.%u\n",
1077 GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR);
1078 drm_printf(p, "\twanted:\t%u.%u\n",
1079 GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR);
1080 drm_printf(p, "\thandshake:\t%u.%u\n",
1081 pf_version->major, pf_version->minor);
1082 }
1083