1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bsearch.h>
8
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11
12 #include "abi/guc_actions_sriov_abi.h"
13 #include "abi/guc_communication_mmio_abi.h"
14 #include "abi/guc_klvs_abi.h"
15 #include "abi/guc_relay_actions_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18
19 #include "xe_assert.h"
20 #include "xe_device.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_gt_sriov_vf_types.h"
25 #include "xe_guc.h"
26 #include "xe_guc_hxg_helpers.h"
27 #include "xe_guc_relay.h"
28 #include "xe_mmio.h"
29 #include "xe_sriov.h"
30 #include "xe_uc_fw.h"
31 #include "xe_wopcm.h"
32
33 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
34
guc_action_vf_reset(struct xe_guc * guc)35 static int guc_action_vf_reset(struct xe_guc *guc)
36 {
37 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
38 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
39 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
40 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET),
41 };
42 int ret;
43
44 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
45
46 return ret > 0 ? -EPROTO : ret;
47 }
48
vf_reset_guc_state(struct xe_gt * gt)49 static int vf_reset_guc_state(struct xe_gt *gt)
50 {
51 struct xe_guc *guc = >->uc.guc;
52 int err;
53
54 err = guc_action_vf_reset(guc);
55 if (unlikely(err))
56 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
57 return err;
58 }
59
guc_action_match_version(struct xe_guc * guc,u32 wanted_branch,u32 wanted_major,u32 wanted_minor,u32 * branch,u32 * major,u32 * minor,u32 * patch)60 static int guc_action_match_version(struct xe_guc *guc,
61 u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
62 u32 *branch, u32 *major, u32 *minor, u32 *patch)
63 {
64 u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
65 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
66 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
67 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
68 GUC_ACTION_VF2GUC_MATCH_VERSION),
69 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
70 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
71 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
72 };
73 u32 response[GUC_MAX_MMIO_MSG_LEN];
74 int ret;
75
76 BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN);
77
78 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
79 if (unlikely(ret < 0))
80 return ret;
81
82 if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
83 return -EPROTO;
84
85 *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
86 *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
87 *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
88 *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
89
90 return 0;
91 }
92
vf_minimum_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)93 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
94 {
95 struct xe_device *xe = gt_to_xe(gt);
96
97 switch (xe->info.platform) {
98 case XE_TIGERLAKE ... XE_PVC:
99 /* 1.1 this is current baseline for Xe driver */
100 *branch = 0;
101 *major = 1;
102 *minor = 1;
103 break;
104 default:
105 /* 1.2 has support for the GMD_ID KLV */
106 *branch = 0;
107 *major = 1;
108 *minor = 2;
109 break;
110 }
111 }
112
vf_wanted_guc_version(struct xe_gt * gt,u32 * branch,u32 * major,u32 * minor)113 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
114 {
115 /* for now it's the same as minimum */
116 return vf_minimum_guc_version(gt, branch, major, minor);
117 }
118
vf_handshake_with_guc(struct xe_gt * gt)119 static int vf_handshake_with_guc(struct xe_gt *gt)
120 {
121 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version;
122 struct xe_guc *guc = >->uc.guc;
123 u32 wanted_branch, wanted_major, wanted_minor;
124 u32 branch, major, minor, patch;
125 int err;
126
127 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
128
129 /* select wanted version - prefer previous (if any) */
130 if (guc_version->major || guc_version->minor) {
131 wanted_branch = guc_version->branch;
132 wanted_major = guc_version->major;
133 wanted_minor = guc_version->minor;
134 } else {
135 vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
136 xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
137 }
138
139 err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
140 &branch, &major, &minor, &patch);
141 if (unlikely(err))
142 goto fail;
143
144 /* we don't support interface version change */
145 if ((guc_version->major || guc_version->minor) &&
146 (guc_version->branch != branch || guc_version->major != major ||
147 guc_version->minor != minor)) {
148 xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
149 branch, major, minor, patch);
150 xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
151 guc_version->branch, guc_version->major,
152 guc_version->minor, guc_version->patch);
153 err = -EREMCHG;
154 goto fail;
155 }
156
157 /* illegal */
158 if (major > wanted_major) {
159 err = -EPROTO;
160 goto unsupported;
161 }
162
163 /* there's no fallback on major version. */
164 if (major != wanted_major) {
165 err = -ENOPKG;
166 goto unsupported;
167 }
168
169 /* check against minimum version supported by us */
170 vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
171 xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
172 if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
173 err = -ENOKEY;
174 goto unsupported;
175 }
176
177 xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
178 branch, major, minor, patch);
179
180 guc_version->branch = branch;
181 guc_version->major = major;
182 guc_version->minor = minor;
183 guc_version->patch = patch;
184 return 0;
185
186 unsupported:
187 xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
188 branch, major, minor, patch, ERR_PTR(err));
189 fail:
190 xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
191 wanted_major, wanted_minor, ERR_PTR(err));
192
193 /* try again with *any* just to query which version is supported */
194 if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
195 GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
196 &branch, &major, &minor, &patch))
197 xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
198 branch, major, minor, patch);
199 return err;
200 }
201
202 /**
203 * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version.
204 * @gt: the &xe_gt
205 *
206 * This function is for VF use only.
207 * It requires functional `GuC MMIO based communication`_.
208 *
209 * Return: 0 on success or a negative error code on failure.
210 */
xe_gt_sriov_vf_bootstrap(struct xe_gt * gt)211 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
212 {
213 int err;
214
215 err = vf_reset_guc_state(gt);
216 if (unlikely(err))
217 return err;
218
219 err = vf_handshake_with_guc(gt);
220 if (unlikely(err))
221 return err;
222
223 return 0;
224 }
225
guc_action_query_single_klv(struct xe_guc * guc,u32 key,u32 * value,u32 value_len)226 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
227 u32 *value, u32 value_len)
228 {
229 u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = {
230 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
231 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
232 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
233 GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV),
234 FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key),
235 };
236 u32 response[GUC_MAX_MMIO_MSG_LEN];
237 u32 length;
238 int ret;
239
240 BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN);
241 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
242 if (unlikely(ret < 0))
243 return ret;
244
245 if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0])))
246 return -EPROTO;
247
248 length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]);
249 if (unlikely(length > value_len))
250 return -EOVERFLOW;
251 if (unlikely(length < value_len))
252 return -ENODATA;
253
254 switch (value_len) {
255 default:
256 xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3);
257 fallthrough;
258 case 3:
259 value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]);
260 fallthrough;
261 case 2:
262 value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]);
263 fallthrough;
264 case 1:
265 value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]);
266 fallthrough;
267 case 0:
268 break;
269 }
270
271 return 0;
272 }
273
guc_action_query_single_klv32(struct xe_guc * guc,u32 key,u32 * value32)274 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32)
275 {
276 return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32));
277 }
278
guc_action_query_single_klv64(struct xe_guc * guc,u32 key,u64 * value64)279 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64)
280 {
281 u32 value[2];
282 int err;
283
284 err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value));
285 if (unlikely(err))
286 return err;
287
288 *value64 = make_u64_from_u32(value[1], value[0]);
289 return 0;
290 }
291
has_gmdid(struct xe_device * xe)292 static bool has_gmdid(struct xe_device *xe)
293 {
294 return GRAPHICS_VERx100(xe) >= 1270;
295 }
296
297 /**
298 * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO.
299 * @gt: the &xe_gt
300 *
301 * This function is for VF use only.
302 *
303 * Return: value of GMDID KLV on success or 0 on failure.
304 */
xe_gt_sriov_vf_gmdid(struct xe_gt * gt)305 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
306 {
307 const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics";
308 struct xe_guc *guc = >->uc.guc;
309 u32 value;
310 int err;
311
312 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
313 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt)));
314 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2);
315
316 err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value);
317 if (unlikely(err)) {
318 xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n",
319 type, ERR_PTR(err));
320 return 0;
321 }
322
323 xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value);
324 return value;
325 }
326
vf_get_ggtt_info(struct xe_gt * gt)327 static int vf_get_ggtt_info(struct xe_gt *gt)
328 {
329 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
330 struct xe_guc *guc = >->uc.guc;
331 u64 start, size;
332 int err;
333
334 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
335
336 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
337 if (unlikely(err))
338 return err;
339
340 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size);
341 if (unlikely(err))
342 return err;
343
344 if (config->ggtt_size && config->ggtt_size != size) {
345 xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
346 size / SZ_1K, config->ggtt_size / SZ_1K);
347 return -EREMCHG;
348 }
349
350 xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
351 start, start + size - 1, size / SZ_1K);
352
353 config->ggtt_base = start;
354 config->ggtt_size = size;
355
356 return config->ggtt_size ? 0 : -ENODATA;
357 }
358
vf_get_lmem_info(struct xe_gt * gt)359 static int vf_get_lmem_info(struct xe_gt *gt)
360 {
361 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
362 struct xe_guc *guc = >->uc.guc;
363 char size_str[10];
364 u64 size;
365 int err;
366
367 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
368
369 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
370 if (unlikely(err))
371 return err;
372
373 if (config->lmem_size && config->lmem_size != size) {
374 xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
375 size / SZ_1M, config->lmem_size / SZ_1M);
376 return -EREMCHG;
377 }
378
379 string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
380 xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
381
382 config->lmem_size = size;
383
384 return config->lmem_size ? 0 : -ENODATA;
385 }
386
vf_get_submission_cfg(struct xe_gt * gt)387 static int vf_get_submission_cfg(struct xe_gt *gt)
388 {
389 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
390 struct xe_guc *guc = >->uc.guc;
391 u32 num_ctxs, num_dbs;
392 int err;
393
394 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
395
396 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
397 if (unlikely(err))
398 return err;
399
400 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs);
401 if (unlikely(err))
402 return err;
403
404 if (config->num_ctxs && config->num_ctxs != num_ctxs) {
405 xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n",
406 num_ctxs, config->num_ctxs);
407 return -EREMCHG;
408 }
409 if (config->num_dbs && config->num_dbs != num_dbs) {
410 xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n",
411 num_dbs, config->num_dbs);
412 return -EREMCHG;
413 }
414
415 xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs);
416
417 config->num_ctxs = num_ctxs;
418 config->num_dbs = num_dbs;
419
420 return config->num_ctxs ? 0 : -ENODATA;
421 }
422
vf_cache_gmdid(struct xe_gt * gt)423 static void vf_cache_gmdid(struct xe_gt *gt)
424 {
425 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt)));
426 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
427
428 gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt);
429 }
430
431 /**
432 * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
433 * @gt: the &xe_gt
434 *
435 * This function is for VF use only.
436 *
437 * Return: 0 on success or a negative error code on failure.
438 */
xe_gt_sriov_vf_query_config(struct xe_gt * gt)439 int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
440 {
441 struct xe_device *xe = gt_to_xe(gt);
442 int err;
443
444 err = vf_get_ggtt_info(gt);
445 if (unlikely(err))
446 return err;
447
448 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
449 err = vf_get_lmem_info(gt);
450 if (unlikely(err))
451 return err;
452 }
453
454 err = vf_get_submission_cfg(gt);
455 if (unlikely(err))
456 return err;
457
458 if (has_gmdid(xe))
459 vf_cache_gmdid(gt);
460
461 return 0;
462 }
463
464 /**
465 * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration.
466 * @gt: the &xe_gt
467 *
468 * This function is for VF use only.
469 *
470 * Return: number of GuC context IDs assigned to VF.
471 */
xe_gt_sriov_vf_guc_ids(struct xe_gt * gt)472 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
473 {
474 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
475 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
476 xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
477
478 return gt->sriov.vf.self_config.num_ctxs;
479 }
480
481 /**
482 * xe_gt_sriov_vf_lmem - VF LMEM configuration.
483 * @gt: the &xe_gt
484 *
485 * This function is for VF use only.
486 *
487 * Return: size of the LMEM assigned to VF.
488 */
xe_gt_sriov_vf_lmem(struct xe_gt * gt)489 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
490 {
491 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
492 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
493 xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
494
495 return gt->sriov.vf.self_config.lmem_size;
496 }
497
498 static struct xe_ggtt_node *
vf_balloon_ggtt_node(struct xe_ggtt * ggtt,u64 start,u64 end)499 vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
500 {
501 struct xe_ggtt_node *node;
502 int err;
503
504 node = xe_ggtt_node_init(ggtt);
505 if (IS_ERR(node))
506 return node;
507
508 err = xe_ggtt_node_insert_balloon(node, start, end);
509 if (err) {
510 xe_ggtt_node_fini(node);
511 return ERR_PTR(err);
512 }
513
514 return node;
515 }
516
vf_balloon_ggtt(struct xe_gt * gt)517 static int vf_balloon_ggtt(struct xe_gt *gt)
518 {
519 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
520 struct xe_tile *tile = gt_to_tile(gt);
521 struct xe_ggtt *ggtt = tile->mem.ggtt;
522 struct xe_device *xe = gt_to_xe(gt);
523 u64 start, end;
524
525 xe_gt_assert(gt, IS_SRIOV_VF(xe));
526 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
527
528 if (!config->ggtt_size)
529 return -ENODATA;
530
531 /*
532 * VF can only use part of the GGTT as allocated by the PF:
533 *
534 * WOPCM GUC_GGTT_TOP
535 * |<------------ Total GGTT size ------------------>|
536 *
537 * VF GGTT base -->|<- size ->|
538 *
539 * +--------------------+----------+-----------------+
540 * |////////////////////| block |\\\\\\\\\\\\\\\\\|
541 * +--------------------+----------+-----------------+
542 *
543 * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
544 */
545
546 start = xe_wopcm_size(xe);
547 end = config->ggtt_base;
548 if (end != start) {
549 tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
550 if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
551 return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
552 }
553
554 start = config->ggtt_base + config->ggtt_size;
555 end = GUC_GGTT_TOP;
556 if (end != start) {
557 tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
558 if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
559 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
560 return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
561 }
562 }
563
564 return 0;
565 }
566
deballoon_ggtt(struct drm_device * drm,void * arg)567 static void deballoon_ggtt(struct drm_device *drm, void *arg)
568 {
569 struct xe_tile *tile = arg;
570
571 xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
572 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
573 xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
574 }
575
576 /**
577 * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
578 * @gt: the &xe_gt
579 *
580 * This function is for VF use only.
581 *
582 * Return: 0 on success or a negative error code on failure.
583 */
xe_gt_sriov_vf_prepare_ggtt(struct xe_gt * gt)584 int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
585 {
586 struct xe_tile *tile = gt_to_tile(gt);
587 struct xe_device *xe = tile_to_xe(tile);
588 int err;
589
590 if (xe_gt_is_media_type(gt))
591 return 0;
592
593 err = vf_balloon_ggtt(gt);
594 if (err)
595 return err;
596
597 return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
598 }
599
relay_action_handshake(struct xe_gt * gt,u32 * major,u32 * minor)600 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
601 {
602 u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
603 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
604 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
605 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE),
606 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) |
607 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor),
608 };
609 u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN];
610 int ret;
611
612 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
613
614 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
615 request, ARRAY_SIZE(request),
616 response, ARRAY_SIZE(response));
617 if (unlikely(ret < 0))
618 return ret;
619
620 if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN))
621 return -EPROTO;
622
623 if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0])))
624 return -EPROTO;
625
626 *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]);
627 *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]);
628
629 return 0;
630 }
631
vf_connect_pf(struct xe_gt * gt,u16 major,u16 minor)632 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
633 {
634 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
635
636 gt->sriov.vf.pf_version.major = major;
637 gt->sriov.vf.pf_version.minor = minor;
638 }
639
vf_disconnect_pf(struct xe_gt * gt)640 static void vf_disconnect_pf(struct xe_gt *gt)
641 {
642 vf_connect_pf(gt, 0, 0);
643 }
644
vf_handshake_with_pf(struct xe_gt * gt)645 static int vf_handshake_with_pf(struct xe_gt *gt)
646 {
647 u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
648 u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
649 u32 major = major_wanted, minor = minor_wanted;
650 int err;
651
652 err = relay_action_handshake(gt, &major, &minor);
653 if (unlikely(err))
654 goto failed;
655
656 if (!major && !minor) {
657 err = -ENODATA;
658 goto failed;
659 }
660
661 xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
662 vf_connect_pf(gt, major, minor);
663 return 0;
664
665 failed:
666 xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
667 major, minor, ERR_PTR(err));
668 vf_disconnect_pf(gt);
669 return err;
670 }
671
672 /**
673 * xe_gt_sriov_vf_connect - Establish connection with the PF driver.
674 * @gt: the &xe_gt
675 *
676 * This function is for VF use only.
677 *
678 * Return: 0 on success or a negative error code on failure.
679 */
xe_gt_sriov_vf_connect(struct xe_gt * gt)680 int xe_gt_sriov_vf_connect(struct xe_gt *gt)
681 {
682 int err;
683
684 err = vf_handshake_with_pf(gt);
685 if (unlikely(err))
686 goto failed;
687
688 return 0;
689
690 failed:
691 xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err));
692 return err;
693 }
694
vf_is_negotiated(struct xe_gt * gt,u16 major,u16 minor)695 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
696 {
697 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
698
699 return major == gt->sriov.vf.pf_version.major &&
700 minor <= gt->sriov.vf.pf_version.minor;
701 }
702
vf_prepare_runtime_info(struct xe_gt * gt,unsigned int num_regs)703 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
704 {
705 struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs;
706 unsigned int regs_size = round_up(num_regs, 4);
707 struct xe_device *xe = gt_to_xe(gt);
708
709 xe_gt_assert(gt, IS_SRIOV_VF(xe));
710
711 if (regs) {
712 if (num_regs <= gt->sriov.vf.runtime.regs_size) {
713 memset(regs, 0, num_regs * sizeof(*regs));
714 gt->sriov.vf.runtime.num_regs = num_regs;
715 return 0;
716 }
717
718 drmm_kfree(&xe->drm, regs);
719 gt->sriov.vf.runtime.regs = NULL;
720 gt->sriov.vf.runtime.num_regs = 0;
721 gt->sriov.vf.runtime.regs_size = 0;
722 }
723
724 regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL);
725 if (unlikely(!regs))
726 return -ENOMEM;
727
728 gt->sriov.vf.runtime.regs = regs;
729 gt->sriov.vf.runtime.num_regs = num_regs;
730 gt->sriov.vf.runtime.regs_size = regs_size;
731 return 0;
732 }
733
vf_query_runtime_info(struct xe_gt * gt)734 static int vf_query_runtime_info(struct xe_gt *gt)
735 {
736 u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN];
737 u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */
738 u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
739 u32 count, remaining, num, i;
740 u32 start = 0;
741 int ret;
742
743 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
744 xe_gt_assert(gt, limit);
745
746 /* this is part of the 1.0 PF/VF ABI */
747 if (!vf_is_negotiated(gt, 1, 0))
748 return -ENOPKG;
749
750 request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
751 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
752 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
753 GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) |
754 FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit);
755
756 repeat:
757 request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start);
758 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
759 request, ARRAY_SIZE(request),
760 response, ARRAY_SIZE(response));
761 if (unlikely(ret < 0))
762 goto failed;
763
764 if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) {
765 ret = -EPROTO;
766 goto failed;
767 }
768 if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) {
769 ret = -EPROTO;
770 goto failed;
771 }
772
773 num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
774 count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]);
775 remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]);
776
777 xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n",
778 count, num, ret, start, remaining);
779
780 if (unlikely(count != num)) {
781 ret = -EPROTO;
782 goto failed;
783 }
784
785 if (start == 0) {
786 ret = vf_prepare_runtime_info(gt, num + remaining);
787 if (unlikely(ret < 0))
788 goto failed;
789 } else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) {
790 ret = -EPROTO;
791 goto failed;
792 }
793
794 for (i = 0; i < num; ++i) {
795 struct vf_runtime_reg *reg = >->sriov.vf.runtime.regs[start + i];
796
797 reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i];
798 reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1];
799 }
800
801 if (remaining) {
802 start += num;
803 goto repeat;
804 }
805
806 return 0;
807
808 failed:
809 vf_prepare_runtime_info(gt, 0);
810 return ret;
811 }
812
vf_show_runtime_info(struct xe_gt * gt)813 static void vf_show_runtime_info(struct xe_gt *gt)
814 {
815 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
816 unsigned int size = gt->sriov.vf.runtime.num_regs;
817
818 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
819
820 for (; size--; vf_regs++)
821 xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n",
822 vf_regs->offset, vf_regs->value);
823 }
824
825 /**
826 * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data.
827 * @gt: the &xe_gt
828 *
829 * This function is for VF use only.
830 *
831 * Return: 0 on success or a negative error code on failure.
832 */
xe_gt_sriov_vf_query_runtime(struct xe_gt * gt)833 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
834 {
835 int err;
836
837 err = vf_query_runtime_info(gt);
838 if (unlikely(err))
839 goto failed;
840
841 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
842 vf_show_runtime_info(gt);
843
844 return 0;
845
846 failed:
847 xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
848 ERR_PTR(err));
849 return err;
850 }
851
vf_runtime_reg_cmp(const void * a,const void * b)852 static int vf_runtime_reg_cmp(const void *a, const void *b)
853 {
854 const struct vf_runtime_reg *ra = a;
855 const struct vf_runtime_reg *rb = b;
856
857 return (int)ra->offset - (int)rb->offset;
858 }
859
vf_lookup_reg(struct xe_gt * gt,u32 addr)860 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
861 {
862 struct xe_gt_sriov_vf_runtime *runtime = >->sriov.vf.runtime;
863 struct vf_runtime_reg key = { .offset = addr };
864
865 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
866
867 return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
868 vf_runtime_reg_cmp);
869 }
870
871 /**
872 * xe_gt_sriov_vf_read32 - Get a register value from the runtime data.
873 * @gt: the &xe_gt
874 * @reg: the register to read
875 *
876 * This function is for VF use only.
877 * This function shall be called after VF has connected to PF.
878 * This function is dedicated for registers that VFs can't read directly.
879 *
880 * Return: register value obtained from the PF or 0 if not found.
881 */
xe_gt_sriov_vf_read32(struct xe_gt * gt,struct xe_reg reg)882 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
883 {
884 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
885 struct vf_runtime_reg *rr;
886
887 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
888 xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
889 xe_gt_assert(gt, !reg.vf);
890
891 if (reg.addr == GMD_ID.addr) {
892 xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n",
893 addr, gt->sriov.vf.runtime.gmdid);
894 return gt->sriov.vf.runtime.gmdid;
895 }
896
897 rr = vf_lookup_reg(gt, addr);
898 if (!rr) {
899 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
900 "VF is trying to read an inaccessible register %#x+%#x\n",
901 reg.addr, addr - reg.addr);
902 return 0;
903 }
904
905 xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value);
906 return rr->value;
907 }
908
909 /**
910 * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
911 * @gt: the &xe_gt
912 * @reg: the register to write
913 * @val: value to write
914 *
915 * This function is for VF use only.
916 * Currently it will trigger a WARN if running on debug build.
917 */
xe_gt_sriov_vf_write32(struct xe_gt * gt,struct xe_reg reg,u32 val)918 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
919 {
920 u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
921
922 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
923 xe_gt_assert(gt, !reg.vf);
924
925 /*
926 * In the future, we may want to handle selected writes to inaccessible
927 * registers in some custom way, but for now let's just log a warning
928 * about such attempt, as likely we might be doing something wrong.
929 */
930 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
931 "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
932 val, reg.addr, addr - reg.addr);
933 }
934
935 /**
936 * xe_gt_sriov_vf_print_config - Print VF self config.
937 * @gt: the &xe_gt
938 * @p: the &drm_printer
939 *
940 * This function is for VF use only.
941 */
xe_gt_sriov_vf_print_config(struct xe_gt * gt,struct drm_printer * p)942 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
943 {
944 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
945 struct xe_device *xe = gt_to_xe(gt);
946 char buf[10];
947
948 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
949
950 drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
951 config->ggtt_base,
952 config->ggtt_base + config->ggtt_size - 1);
953
954 string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
955 drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
956
957 if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
958 string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
959 drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
960 }
961
962 drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
963 drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
964 }
965
966 /**
967 * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF.
968 * @gt: the &xe_gt
969 * @p: the &drm_printer
970 *
971 * This function is for VF use only.
972 */
xe_gt_sriov_vf_print_runtime(struct xe_gt * gt,struct drm_printer * p)973 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
974 {
975 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
976 unsigned int size = gt->sriov.vf.runtime.num_regs;
977
978 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
979
980 for (; size--; vf_regs++)
981 drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value);
982 }
983
984 /**
985 * xe_gt_sriov_vf_print_version - Print VF ABI versions.
986 * @gt: the &xe_gt
987 * @p: the &drm_printer
988 *
989 * This function is for VF use only.
990 */
xe_gt_sriov_vf_print_version(struct xe_gt * gt,struct drm_printer * p)991 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
992 {
993 struct xe_gt_sriov_vf_guc_version *guc_version = >->sriov.vf.guc_version;
994 struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version;
995 u32 branch, major, minor;
996
997 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
998
999 drm_printf(p, "GuC ABI:\n");
1000
1001 vf_minimum_guc_version(gt, &branch, &major, &minor);
1002 drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
1003
1004 vf_wanted_guc_version(gt, &branch, &major, &minor);
1005 drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
1006
1007 drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
1008 guc_version->branch, guc_version->major,
1009 guc_version->minor, guc_version->patch);
1010
1011 drm_printf(p, "PF ABI:\n");
1012
1013 drm_printf(p, "\tbase:\t%u.%u\n",
1014 GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR);
1015 drm_printf(p, "\twanted:\t%u.%u\n",
1016 GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR);
1017 drm_printf(p, "\thandshake:\t%u.%u\n",
1018 pf_version->major, pf_version->minor);
1019 }
1020