xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_vf.c (revision 3bce87eb744f1f88523a118e10e0deebf31806ec)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/bsearch.h>
8 
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11 
12 #include "abi/guc_actions_sriov_abi.h"
13 #include "abi/guc_communication_mmio_abi.h"
14 #include "abi/guc_klvs_abi.h"
15 #include "abi/guc_relay_actions_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18 
19 #include "xe_assert.h"
20 #include "xe_device.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_gt_sriov_vf_types.h"
25 #include "xe_guc.h"
26 #include "xe_guc_hxg_helpers.h"
27 #include "xe_guc_relay.h"
28 #include "xe_mmio.h"
29 #include "xe_sriov.h"
30 #include "xe_uc_fw.h"
31 #include "xe_wopcm.h"
32 
33 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
34 
35 static int guc_action_vf_reset(struct xe_guc *guc)
36 {
37 	u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
38 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
39 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
40 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET),
41 	};
42 	int ret;
43 
44 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
45 
46 	return ret > 0 ? -EPROTO : ret;
47 }
48 
49 static int vf_reset_guc_state(struct xe_gt *gt)
50 {
51 	struct xe_guc *guc = &gt->uc.guc;
52 	int err;
53 
54 	err = guc_action_vf_reset(guc);
55 	if (unlikely(err))
56 		xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
57 	return err;
58 }
59 
60 static int guc_action_match_version(struct xe_guc *guc,
61 				    u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
62 				    u32 *branch, u32 *major, u32 *minor, u32 *patch)
63 {
64 	u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
65 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
66 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
67 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
68 			   GUC_ACTION_VF2GUC_MATCH_VERSION),
69 		FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
70 		FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
71 		FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
72 	};
73 	u32 response[GUC_MAX_MMIO_MSG_LEN];
74 	int ret;
75 
76 	BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN);
77 
78 	ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
79 	if (unlikely(ret < 0))
80 		return ret;
81 
82 	if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
83 		return -EPROTO;
84 
85 	*branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
86 	*major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
87 	*minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
88 	*patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
89 
90 	return 0;
91 }
92 
93 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
94 {
95 	struct xe_device *xe = gt_to_xe(gt);
96 
97 	switch (xe->info.platform) {
98 	case XE_TIGERLAKE ... XE_PVC:
99 		/* 1.1 this is current baseline for Xe driver */
100 		*branch = 0;
101 		*major = 1;
102 		*minor = 1;
103 		break;
104 	default:
105 		/* 1.2 has support for the GMD_ID KLV */
106 		*branch = 0;
107 		*major = 1;
108 		*minor = 2;
109 		break;
110 	}
111 }
112 
113 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
114 {
115 	/* for now it's the same as minimum */
116 	return vf_minimum_guc_version(gt, branch, major, minor);
117 }
118 
119 static int vf_handshake_with_guc(struct xe_gt *gt)
120 {
121 	struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
122 	struct xe_guc *guc = &gt->uc.guc;
123 	u32 wanted_branch, wanted_major, wanted_minor;
124 	u32 branch, major, minor, patch;
125 	int err;
126 
127 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
128 
129 	/* select wanted version - prefer previous (if any) */
130 	if (guc_version->major || guc_version->minor) {
131 		wanted_branch = guc_version->branch;
132 		wanted_major = guc_version->major;
133 		wanted_minor = guc_version->minor;
134 	} else {
135 		vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
136 		xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
137 	}
138 
139 	err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
140 				       &branch, &major, &minor, &patch);
141 	if (unlikely(err))
142 		goto fail;
143 
144 	/* we don't support interface version change */
145 	if ((guc_version->major || guc_version->minor) &&
146 	    (guc_version->branch != branch || guc_version->major != major ||
147 	     guc_version->minor != minor)) {
148 		xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
149 				branch, major, minor, patch);
150 		xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
151 				 guc_version->branch, guc_version->major,
152 				 guc_version->minor, guc_version->patch);
153 		err = -EREMCHG;
154 		goto fail;
155 	}
156 
157 	/* illegal */
158 	if (major > wanted_major) {
159 		err = -EPROTO;
160 		goto unsupported;
161 	}
162 
163 	/* there's no fallback on major version. */
164 	if (major != wanted_major) {
165 		err = -ENOPKG;
166 		goto unsupported;
167 	}
168 
169 	/* check against minimum version supported by us */
170 	vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
171 	xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
172 	if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
173 		err = -ENOKEY;
174 		goto unsupported;
175 	}
176 
177 	xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
178 			branch, major, minor, patch);
179 
180 	guc_version->branch = branch;
181 	guc_version->major = major;
182 	guc_version->minor = minor;
183 	guc_version->patch = patch;
184 	return 0;
185 
186 unsupported:
187 	xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
188 			branch, major, minor, patch, ERR_PTR(err));
189 fail:
190 	xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
191 			wanted_major, wanted_minor, ERR_PTR(err));
192 
193 	/* try again with *any* just to query which version is supported */
194 	if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
195 				      GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
196 				      &branch, &major, &minor, &patch))
197 		xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
198 				   branch, major, minor, patch);
199 	return err;
200 }
201 
202 /**
203  * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version.
204  * @gt: the &xe_gt
205  *
206  * This function is for VF use only.
207  * It requires functional `GuC MMIO based communication`_.
208  *
209  * Return: 0 on success or a negative error code on failure.
210  */
211 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
212 {
213 	int err;
214 
215 	err = vf_reset_guc_state(gt);
216 	if (unlikely(err))
217 		return err;
218 
219 	err = vf_handshake_with_guc(gt);
220 	if (unlikely(err))
221 		return err;
222 
223 	return 0;
224 }
225 
226 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
227 				       u32 *value, u32 value_len)
228 {
229 	u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = {
230 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
231 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
232 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
233 			   GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV),
234 		FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key),
235 	};
236 	u32 response[GUC_MAX_MMIO_MSG_LEN];
237 	u32 length;
238 	int ret;
239 
240 	BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN);
241 	ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
242 	if (unlikely(ret < 0))
243 		return ret;
244 
245 	if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0])))
246 		return -EPROTO;
247 
248 	length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]);
249 	if (unlikely(length > value_len))
250 		return -EOVERFLOW;
251 	if (unlikely(length < value_len))
252 		return -ENODATA;
253 
254 	switch (value_len) {
255 	default:
256 		xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3);
257 		fallthrough;
258 	case 3:
259 		value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]);
260 		fallthrough;
261 	case 2:
262 		value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]);
263 		fallthrough;
264 	case 1:
265 		value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]);
266 		fallthrough;
267 	case 0:
268 		break;
269 	}
270 
271 	return 0;
272 }
273 
274 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32)
275 {
276 	return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32));
277 }
278 
279 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64)
280 {
281 	u32 value[2];
282 	int err;
283 
284 	err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value));
285 	if (unlikely(err))
286 		return err;
287 
288 	*value64 = make_u64_from_u32(value[1], value[0]);
289 	return 0;
290 }
291 
292 static bool has_gmdid(struct xe_device *xe)
293 {
294 	return GRAPHICS_VERx100(xe) >= 1270;
295 }
296 
297 /**
298  * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO.
299  * @gt: the &xe_gt
300  *
301  * This function is for VF use only.
302  *
303  * Return: value of GMDID KLV on success or 0 on failure.
304  */
305 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
306 {
307 	const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics";
308 	struct xe_guc *guc = &gt->uc.guc;
309 	u32 value;
310 	int err;
311 
312 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
313 	xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt)));
314 	xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2);
315 
316 	err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value);
317 	if (unlikely(err)) {
318 		xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n",
319 				type, ERR_PTR(err));
320 		return 0;
321 	}
322 
323 	xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value);
324 	return value;
325 }
326 
327 static int vf_get_ggtt_info(struct xe_gt *gt)
328 {
329 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
330 	struct xe_guc *guc = &gt->uc.guc;
331 	u64 start, size;
332 	int err;
333 
334 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
335 
336 	err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
337 	if (unlikely(err))
338 		return err;
339 
340 	err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size);
341 	if (unlikely(err))
342 		return err;
343 
344 	if (config->ggtt_size && config->ggtt_size != size) {
345 		xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
346 				size / SZ_1K, config->ggtt_size / SZ_1K);
347 		return -EREMCHG;
348 	}
349 
350 	xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
351 				start, start + size - 1, size / SZ_1K);
352 
353 	config->ggtt_base = start;
354 	config->ggtt_size = size;
355 
356 	return config->ggtt_size ? 0 : -ENODATA;
357 }
358 
359 static int vf_get_lmem_info(struct xe_gt *gt)
360 {
361 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
362 	struct xe_guc *guc = &gt->uc.guc;
363 	char size_str[10];
364 	u64 size;
365 	int err;
366 
367 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
368 
369 	err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
370 	if (unlikely(err))
371 		return err;
372 
373 	if (config->lmem_size && config->lmem_size != size) {
374 		xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
375 				size / SZ_1M, config->lmem_size / SZ_1M);
376 		return -EREMCHG;
377 	}
378 
379 	string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
380 	xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
381 
382 	config->lmem_size = size;
383 
384 	return config->lmem_size ? 0 : -ENODATA;
385 }
386 
387 static int vf_get_submission_cfg(struct xe_gt *gt)
388 {
389 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
390 	struct xe_guc *guc = &gt->uc.guc;
391 	u32 num_ctxs, num_dbs;
392 	int err;
393 
394 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
395 
396 	err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
397 	if (unlikely(err))
398 		return err;
399 
400 	err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs);
401 	if (unlikely(err))
402 		return err;
403 
404 	if (config->num_ctxs && config->num_ctxs != num_ctxs) {
405 		xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n",
406 				num_ctxs, config->num_ctxs);
407 		return -EREMCHG;
408 	}
409 	if (config->num_dbs && config->num_dbs != num_dbs) {
410 		xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n",
411 				num_dbs, config->num_dbs);
412 		return -EREMCHG;
413 	}
414 
415 	xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs);
416 
417 	config->num_ctxs = num_ctxs;
418 	config->num_dbs = num_dbs;
419 
420 	return config->num_ctxs ? 0 : -ENODATA;
421 }
422 
423 static void vf_cache_gmdid(struct xe_gt *gt)
424 {
425 	xe_gt_assert(gt, has_gmdid(gt_to_xe(gt)));
426 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
427 
428 	gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt);
429 }
430 
431 /**
432  * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
433  * @gt: the &xe_gt
434  *
435  * This function is for VF use only.
436  *
437  * Return: 0 on success or a negative error code on failure.
438  */
439 int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
440 {
441 	struct xe_device *xe = gt_to_xe(gt);
442 	int err;
443 
444 	err = vf_get_ggtt_info(gt);
445 	if (unlikely(err))
446 		return err;
447 
448 	if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
449 		err = vf_get_lmem_info(gt);
450 		if (unlikely(err))
451 			return err;
452 	}
453 
454 	err = vf_get_submission_cfg(gt);
455 	if (unlikely(err))
456 		return err;
457 
458 	if (has_gmdid(xe))
459 		vf_cache_gmdid(gt);
460 
461 	return 0;
462 }
463 
464 /**
465  * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration.
466  * @gt: the &xe_gt
467  *
468  * This function is for VF use only.
469  *
470  * Return: number of GuC context IDs assigned to VF.
471  */
472 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
473 {
474 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
475 	xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
476 	xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
477 
478 	return gt->sriov.vf.self_config.num_ctxs;
479 }
480 
481 /**
482  * xe_gt_sriov_vf_lmem - VF LMEM configuration.
483  * @gt: the &xe_gt
484  *
485  * This function is for VF use only.
486  *
487  * Return: size of the LMEM assigned to VF.
488  */
489 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
490 {
491 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
492 	xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
493 	xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
494 
495 	return gt->sriov.vf.self_config.lmem_size;
496 }
497 
498 static int vf_balloon_ggtt(struct xe_gt *gt)
499 {
500 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
501 	struct xe_tile *tile = gt_to_tile(gt);
502 	struct xe_ggtt *ggtt = tile->mem.ggtt;
503 	struct xe_device *xe = gt_to_xe(gt);
504 	u64 start, end;
505 	int err;
506 
507 	xe_gt_assert(gt, IS_SRIOV_VF(xe));
508 	xe_gt_assert(gt, !xe_gt_is_media_type(gt));
509 
510 	if (!config->ggtt_size)
511 		return -ENODATA;
512 
513 	/*
514 	 * VF can only use part of the GGTT as allocated by the PF:
515 	 *
516 	 *      WOPCM                                  GUC_GGTT_TOP
517 	 *      |<------------ Total GGTT size ------------------>|
518 	 *
519 	 *           VF GGTT base -->|<- size ->|
520 	 *
521 	 *      +--------------------+----------+-----------------+
522 	 *      |////////////////////|   block  |\\\\\\\\\\\\\\\\\|
523 	 *      +--------------------+----------+-----------------+
524 	 *
525 	 *      |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
526 	 */
527 
528 	start = xe_wopcm_size(xe);
529 	end = config->ggtt_base;
530 	if (end != start) {
531 		err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[0]);
532 		if (err)
533 			goto failed;
534 	}
535 
536 	start = config->ggtt_base + config->ggtt_size;
537 	end = GUC_GGTT_TOP;
538 	if (end != start) {
539 		err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[1]);
540 		if (err)
541 			goto deballoon;
542 	}
543 
544 	return 0;
545 
546 deballoon:
547 	xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]);
548 failed:
549 	return err;
550 }
551 
552 static void deballoon_ggtt(struct drm_device *drm, void *arg)
553 {
554 	struct xe_tile *tile = arg;
555 	struct xe_ggtt *ggtt = tile->mem.ggtt;
556 
557 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
558 	xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[1]);
559 	xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]);
560 }
561 
562 /**
563  * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
564  * @gt: the &xe_gt
565  *
566  * This function is for VF use only.
567  *
568  * Return: 0 on success or a negative error code on failure.
569  */
570 int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
571 {
572 	struct xe_tile *tile = gt_to_tile(gt);
573 	struct xe_device *xe = tile_to_xe(tile);
574 	int err;
575 
576 	if (xe_gt_is_media_type(gt))
577 		return 0;
578 
579 	err = vf_balloon_ggtt(gt);
580 	if (err)
581 		return err;
582 
583 	return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
584 }
585 
586 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
587 {
588 	u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
589 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
590 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
591 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE),
592 		FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) |
593 		FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor),
594 	};
595 	u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN];
596 	int ret;
597 
598 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
599 
600 	ret = xe_guc_relay_send_to_pf(&gt->uc.guc.relay,
601 				      request, ARRAY_SIZE(request),
602 				      response, ARRAY_SIZE(response));
603 	if (unlikely(ret < 0))
604 		return ret;
605 
606 	if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN))
607 		return -EPROTO;
608 
609 	if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0])))
610 		return -EPROTO;
611 
612 	*major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]);
613 	*minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]);
614 
615 	return 0;
616 }
617 
618 static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor)
619 {
620 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
621 
622 	gt->sriov.vf.pf_version.major = major;
623 	gt->sriov.vf.pf_version.minor = minor;
624 }
625 
626 static void vf_disconnect_pf(struct xe_gt *gt)
627 {
628 	vf_connect_pf(gt, 0, 0);
629 }
630 
631 static int vf_handshake_with_pf(struct xe_gt *gt)
632 {
633 	u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
634 	u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
635 	u32 major = major_wanted, minor = minor_wanted;
636 	int err;
637 
638 	err = relay_action_handshake(gt, &major, &minor);
639 	if (unlikely(err))
640 		goto failed;
641 
642 	if (!major && !minor) {
643 		err = -ENODATA;
644 		goto failed;
645 	}
646 
647 	xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
648 	vf_connect_pf(gt, major, minor);
649 	return 0;
650 
651 failed:
652 	xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
653 			major, minor, ERR_PTR(err));
654 	vf_disconnect_pf(gt);
655 	return err;
656 }
657 
658 /**
659  * xe_gt_sriov_vf_connect - Establish connection with the PF driver.
660  * @gt: the &xe_gt
661  *
662  * This function is for VF use only.
663  *
664  * Return: 0 on success or a negative error code on failure.
665  */
666 int xe_gt_sriov_vf_connect(struct xe_gt *gt)
667 {
668 	int err;
669 
670 	err = vf_handshake_with_pf(gt);
671 	if (unlikely(err))
672 		goto failed;
673 
674 	return 0;
675 
676 failed:
677 	xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err));
678 	return err;
679 }
680 
681 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
682 {
683 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
684 
685 	return major == gt->sriov.vf.pf_version.major &&
686 	       minor <= gt->sriov.vf.pf_version.minor;
687 }
688 
689 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
690 {
691 	struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs;
692 	unsigned int regs_size = round_up(num_regs, 4);
693 	struct xe_device *xe = gt_to_xe(gt);
694 
695 	xe_gt_assert(gt, IS_SRIOV_VF(xe));
696 
697 	if (regs) {
698 		if (num_regs <= gt->sriov.vf.runtime.regs_size) {
699 			memset(regs, 0, num_regs * sizeof(*regs));
700 			gt->sriov.vf.runtime.num_regs = num_regs;
701 			return 0;
702 		}
703 
704 		drmm_kfree(&xe->drm, regs);
705 		gt->sriov.vf.runtime.regs = NULL;
706 		gt->sriov.vf.runtime.num_regs = 0;
707 		gt->sriov.vf.runtime.regs_size = 0;
708 	}
709 
710 	regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL);
711 	if (unlikely(!regs))
712 		return -ENOMEM;
713 
714 	gt->sriov.vf.runtime.regs = regs;
715 	gt->sriov.vf.runtime.num_regs = num_regs;
716 	gt->sriov.vf.runtime.regs_size = regs_size;
717 	return 0;
718 }
719 
720 static int vf_query_runtime_info(struct xe_gt *gt)
721 {
722 	u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN];
723 	u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */
724 	u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
725 	u32 count, remaining, num, i;
726 	u32 start = 0;
727 	int ret;
728 
729 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
730 	xe_gt_assert(gt, limit);
731 
732 	/* this is part of the 1.0 PF/VF ABI */
733 	if (!vf_is_negotiated(gt, 1, 0))
734 		return -ENOPKG;
735 
736 	request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
737 		     FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
738 		     FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
739 				GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) |
740 		     FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit);
741 
742 repeat:
743 	request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start);
744 	ret = xe_guc_relay_send_to_pf(&gt->uc.guc.relay,
745 				      request, ARRAY_SIZE(request),
746 				      response, ARRAY_SIZE(response));
747 	if (unlikely(ret < 0))
748 		goto failed;
749 
750 	if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) {
751 		ret = -EPROTO;
752 		goto failed;
753 	}
754 	if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) {
755 		ret = -EPROTO;
756 		goto failed;
757 	}
758 
759 	num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
760 	count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]);
761 	remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]);
762 
763 	xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n",
764 				count, num, ret, start, remaining);
765 
766 	if (unlikely(count != num)) {
767 		ret = -EPROTO;
768 		goto failed;
769 	}
770 
771 	if (start == 0) {
772 		ret = vf_prepare_runtime_info(gt, num + remaining);
773 		if (unlikely(ret < 0))
774 			goto failed;
775 	} else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) {
776 		ret = -EPROTO;
777 		goto failed;
778 	}
779 
780 	for (i = 0; i < num; ++i) {
781 		struct vf_runtime_reg *reg = &gt->sriov.vf.runtime.regs[start + i];
782 
783 		reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i];
784 		reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1];
785 	}
786 
787 	if (remaining) {
788 		start += num;
789 		goto repeat;
790 	}
791 
792 	return 0;
793 
794 failed:
795 	vf_prepare_runtime_info(gt, 0);
796 	return ret;
797 }
798 
799 static void vf_show_runtime_info(struct xe_gt *gt)
800 {
801 	struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
802 	unsigned int size = gt->sriov.vf.runtime.num_regs;
803 
804 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
805 
806 	for (; size--; vf_regs++)
807 		xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n",
808 				vf_regs->offset, vf_regs->value);
809 }
810 
811 /**
812  * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data.
813  * @gt: the &xe_gt
814  *
815  * This function is for VF use only.
816  *
817  * Return: 0 on success or a negative error code on failure.
818  */
819 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
820 {
821 	int err;
822 
823 	err = vf_query_runtime_info(gt);
824 	if (unlikely(err))
825 		goto failed;
826 
827 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
828 		vf_show_runtime_info(gt);
829 
830 	return 0;
831 
832 failed:
833 	xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
834 			ERR_PTR(err));
835 	return err;
836 }
837 
838 static int vf_runtime_reg_cmp(const void *a, const void *b)
839 {
840 	const struct vf_runtime_reg *ra = a;
841 	const struct vf_runtime_reg *rb = b;
842 
843 	return (int)ra->offset - (int)rb->offset;
844 }
845 
846 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
847 {
848 	struct xe_gt_sriov_vf_runtime *runtime = &gt->sriov.vf.runtime;
849 	struct vf_runtime_reg key = { .offset = addr };
850 
851 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
852 
853 	return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
854 		       vf_runtime_reg_cmp);
855 }
856 
857 /**
858  * xe_gt_sriov_vf_read32 - Get a register value from the runtime data.
859  * @gt: the &xe_gt
860  * @reg: the register to read
861  *
862  * This function is for VF use only.
863  * This function shall be called after VF has connected to PF.
864  * This function is dedicated for registers that VFs can't read directly.
865  *
866  * Return: register value obtained from the PF or 0 if not found.
867  */
868 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
869 {
870 	u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
871 	struct vf_runtime_reg *rr;
872 
873 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
874 	xe_gt_assert(gt, gt->sriov.vf.pf_version.major);
875 	xe_gt_assert(gt, !reg.vf);
876 
877 	if (reg.addr == GMD_ID.addr) {
878 		xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n",
879 					addr, gt->sriov.vf.runtime.gmdid);
880 		return gt->sriov.vf.runtime.gmdid;
881 	}
882 
883 	rr = vf_lookup_reg(gt, addr);
884 	if (!rr) {
885 		xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
886 			   "VF is trying to read an inaccessible register %#x+%#x\n",
887 			   reg.addr, addr - reg.addr);
888 		return 0;
889 	}
890 
891 	xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value);
892 	return rr->value;
893 }
894 
895 /**
896  * xe_gt_sriov_vf_print_config - Print VF self config.
897  * @gt: the &xe_gt
898  * @p: the &drm_printer
899  *
900  * This function is for VF use only.
901  */
902 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
903 {
904 	struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
905 	struct xe_device *xe = gt_to_xe(gt);
906 	char buf[10];
907 
908 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
909 
910 	drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
911 		   config->ggtt_base,
912 		   config->ggtt_base + config->ggtt_size - 1);
913 
914 	string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
915 	drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
916 
917 	if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
918 		string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
919 		drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
920 	}
921 
922 	drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
923 	drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
924 }
925 
926 /**
927  * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF.
928  * @gt: the &xe_gt
929  * @p: the &drm_printer
930  *
931  * This function is for VF use only.
932  */
933 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
934 {
935 	struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
936 	unsigned int size = gt->sriov.vf.runtime.num_regs;
937 
938 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
939 
940 	for (; size--; vf_regs++)
941 		drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value);
942 }
943 
944 /**
945  * xe_gt_sriov_vf_print_version - Print VF ABI versions.
946  * @gt: the &xe_gt
947  * @p: the &drm_printer
948  *
949  * This function is for VF use only.
950  */
951 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
952 {
953 	struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
954 	struct xe_gt_sriov_vf_relay_version *pf_version = &gt->sriov.vf.pf_version;
955 	u32 branch, major, minor;
956 
957 	xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
958 
959 	drm_printf(p, "GuC ABI:\n");
960 
961 	vf_minimum_guc_version(gt, &branch, &major, &minor);
962 	drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
963 
964 	vf_wanted_guc_version(gt, &branch, &major, &minor);
965 	drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
966 
967 	drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
968 		   guc_version->branch, guc_version->major,
969 		   guc_version->minor, guc_version->patch);
970 
971 	drm_printf(p, "PF ABI:\n");
972 
973 	drm_printf(p, "\tbase:\t%u.%u\n",
974 		   GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR);
975 	drm_printf(p, "\twanted:\t%u.%u\n",
976 		   GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR);
977 	drm_printf(p, "\thandshake:\t%u.%u\n",
978 		   pf_version->major, pf_version->minor);
979 }
980