xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c (revision ff48e05d8d1eefbdeb4504c0275c78654b858046)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "abi/guc_actions_sriov_abi.h"
9 #include "abi/guc_relay_actions_abi.h"
10 
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "regs/xe_regs.h"
14 
15 #include "xe_mmio.h"
16 #include "xe_gt_sriov_printk.h"
17 #include "xe_gt_sriov_pf_helpers.h"
18 #include "xe_gt_sriov_pf_service.h"
19 #include "xe_gt_sriov_pf_service_types.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hxg_helpers.h"
22 
23 static void pf_init_versions(struct xe_gt *gt)
24 {
25 	BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
26 	BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
27 
28 	/* base versions may differ between platforms */
29 	gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
30 	gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
31 
32 	/* latest version is same for all platforms */
33 	gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
34 	gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
35 }
36 
37 /* Return: 0 on success or a negative error code on failure. */
38 static int pf_negotiate_version(struct xe_gt *gt,
39 				u32 wanted_major, u32 wanted_minor,
40 				u32 *major, u32 *minor)
41 {
42 	struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
43 	struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
44 
45 	xe_gt_assert(gt, base.major);
46 	xe_gt_assert(gt, base.major <= latest.major);
47 	xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
48 
49 	/* VF doesn't care - return our latest  */
50 	if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
51 	    wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
52 		*major = latest.major;
53 		*minor = latest.minor;
54 		return 0;
55 	}
56 
57 	/* VF wants newer than our - return our latest  */
58 	if (wanted_major > latest.major) {
59 		*major = latest.major;
60 		*minor = latest.minor;
61 		return 0;
62 	}
63 
64 	/* VF wants older than min required - reject */
65 	if (wanted_major < base.major ||
66 	    (wanted_major == base.major && wanted_minor < base.minor)) {
67 		return -EPERM;
68 	}
69 
70 	/* previous major - return wanted, as we should still support it */
71 	if (wanted_major < latest.major) {
72 		/* XXX: we are not prepared for multi-versions yet */
73 		xe_gt_assert(gt, base.major == latest.major);
74 		return -ENOPKG;
75 	}
76 
77 	/* same major - return common minor */
78 	*major = wanted_major;
79 	*minor = min_t(u32, latest.minor, wanted_minor);
80 	return 0;
81 }
82 
83 static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
84 {
85 	xe_gt_sriov_pf_assert_vfid(gt, vfid);
86 	xe_gt_assert(gt, major || minor);
87 
88 	gt->sriov.pf.vfs[vfid].version.major = major;
89 	gt->sriov.pf.vfs[vfid].version.minor = minor;
90 }
91 
92 static void pf_disconnect(struct xe_gt *gt, u32 vfid)
93 {
94 	xe_gt_sriov_pf_assert_vfid(gt, vfid);
95 
96 	gt->sriov.pf.vfs[vfid].version.major = 0;
97 	gt->sriov.pf.vfs[vfid].version.minor = 0;
98 }
99 
100 static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
101 {
102 	xe_gt_sriov_pf_assert_vfid(gt, vfid);
103 
104 	return major == gt->sriov.pf.vfs[vfid].version.major &&
105 	       minor <= gt->sriov.pf.vfs[vfid].version.minor;
106 }
107 
108 static const struct xe_reg tgl_runtime_regs[] = {
109 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
110 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
111 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
112 	XELP_GT_SLICE_ENABLE,		/* _MMIO(0x9138) */
113 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
114 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
115 	CTC_MODE,			/* _MMIO(0xa26c) */
116 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
117 	TIMESTAMP_OVERRIDE,		/* _MMIO(0x44074) */
118 };
119 
120 static const struct xe_reg ats_m_runtime_regs[] = {
121 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
122 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
123 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
124 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
125 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
126 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
127 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
128 	CTC_MODE,			/* _MMIO(0xa26c) */
129 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
130 	TIMESTAMP_OVERRIDE,		/* _MMIO(0x44074) */
131 };
132 
133 static const struct xe_reg pvc_runtime_regs[] = {
134 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
135 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
136 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
137 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
138 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
139 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
140 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
141 	CTC_MODE,			/* _MMIO(0xA26C) */
142 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
143 	TIMESTAMP_OVERRIDE,		/* _MMIO(0x44074) */
144 };
145 
146 static const struct xe_reg ver_1270_runtime_regs[] = {
147 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
148 	XEHP_FUSE4,			/* _MMIO(0x9114) */
149 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
150 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
151 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
152 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
153 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
154 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
155 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
156 	CTC_MODE,			/* _MMIO(0xa26c) */
157 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
158 	TIMESTAMP_OVERRIDE,		/* _MMIO(0x44074) */
159 };
160 
161 static const struct xe_reg ver_2000_runtime_regs[] = {
162 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
163 	XEHP_FUSE4,			/* _MMIO(0x9114) */
164 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
165 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
166 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
167 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
168 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
169 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
170 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
171 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
172 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
173 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
174 	CTC_MODE,			/* _MMIO(0xa26c) */
175 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
176 	TIMESTAMP_OVERRIDE,		/* _MMIO(0x44074) */
177 };
178 
179 static const struct xe_reg ver_3000_runtime_regs[] = {
180 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
181 	XEHP_FUSE4,			/* _MMIO(0x9114) */
182 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
183 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
184 	MIRROR_L3BANK_ENABLE,		/* _MMIO(0x9130) */
185 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
186 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
187 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
188 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
189 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
190 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
191 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
192 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
193 	CTC_MODE,			/* _MMIO(0xa26c) */
194 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
195 };
196 
197 static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
198 {
199 	const struct xe_reg *regs;
200 
201 	if (GRAPHICS_VERx100(xe) >= 3000) {
202 		*count = ARRAY_SIZE(ver_3000_runtime_regs);
203 		regs = ver_3000_runtime_regs;
204 	} else if (GRAPHICS_VERx100(xe) >= 2000) {
205 		*count = ARRAY_SIZE(ver_2000_runtime_regs);
206 		regs = ver_2000_runtime_regs;
207 	} else if (GRAPHICS_VERx100(xe) >= 1270) {
208 		*count = ARRAY_SIZE(ver_1270_runtime_regs);
209 		regs = ver_1270_runtime_regs;
210 	} else if (GRAPHICS_VERx100(xe) == 1260) {
211 		*count = ARRAY_SIZE(pvc_runtime_regs);
212 		regs = pvc_runtime_regs;
213 	} else if (GRAPHICS_VERx100(xe) == 1255) {
214 		*count = ARRAY_SIZE(ats_m_runtime_regs);
215 		regs = ats_m_runtime_regs;
216 	} else if (GRAPHICS_VERx100(xe) == 1200) {
217 		*count = ARRAY_SIZE(tgl_runtime_regs);
218 		regs = tgl_runtime_regs;
219 	} else {
220 		regs = ERR_PTR(-ENOPKG);
221 		*count = 0;
222 	}
223 
224 	return regs;
225 }
226 
227 static int pf_alloc_runtime_info(struct xe_gt *gt)
228 {
229 	struct xe_device *xe = gt_to_xe(gt);
230 	const struct xe_reg *regs;
231 	unsigned int size;
232 	u32 *values;
233 
234 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
235 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
236 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
237 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
238 
239 	regs = pick_runtime_regs(xe, &size);
240 	if (IS_ERR(regs))
241 		return PTR_ERR(regs);
242 
243 	if (unlikely(!size))
244 		return 0;
245 
246 	values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
247 	if (!values)
248 		return -ENOMEM;
249 
250 	gt->sriov.pf.service.runtime.size = size;
251 	gt->sriov.pf.service.runtime.regs = regs;
252 	gt->sriov.pf.service.runtime.values = values;
253 
254 	return 0;
255 }
256 
257 static void read_many(struct xe_gt *gt, unsigned int count,
258 		      const struct xe_reg *regs, u32 *values)
259 {
260 	while (count--)
261 		*values++ = xe_mmio_read32(&gt->mmio, *regs++);
262 }
263 
264 static void pf_prepare_runtime_info(struct xe_gt *gt)
265 {
266 	const struct xe_reg *regs;
267 	unsigned int size;
268 	u32 *values;
269 
270 	if (!gt->sriov.pf.service.runtime.size)
271 		return;
272 
273 	size = gt->sriov.pf.service.runtime.size;
274 	regs = gt->sriov.pf.service.runtime.regs;
275 	values = gt->sriov.pf.service.runtime.values;
276 
277 	read_many(gt, size, regs, values);
278 
279 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
280 		struct drm_printer p = xe_gt_info_printer(gt);
281 
282 		xe_gt_sriov_pf_service_print_runtime(gt, &p);
283 	}
284 }
285 
286 /**
287  * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
288  * @gt: the &xe_gt to initialize
289  *
290  * Performs early initialization of the GT SR-IOV PF services, including preparation
291  * of the runtime info that will be shared with VFs.
292  *
293  * This function can only be called on PF.
294  */
295 int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
296 {
297 	int err;
298 
299 	pf_init_versions(gt);
300 
301 	err = pf_alloc_runtime_info(gt);
302 	if (unlikely(err))
303 		goto failed;
304 
305 	return 0;
306 failed:
307 	xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
308 	return err;
309 }
310 
311 /**
312  * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
313  * @gt: the &xe_gt to update
314  *
315  * Updates runtime data shared with VFs.
316  *
317  * This function can be called more than once.
318  * This function can only be called on PF.
319  */
320 void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
321 {
322 	pf_prepare_runtime_info(gt);
323 }
324 
325 /**
326  * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
327  * @gt: the &xe_gt
328  * @vfid: the VF identifier
329  *
330  * Reset a VF driver negotiated VF/PF ABI version.
331  * After that point, the VF driver will have to perform new version handshake
332  * to continue use of the PF services again.
333  *
334  * This function can only be called on PF.
335  */
336 void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
337 {
338 	pf_disconnect(gt, vfid);
339 }
340 
341 /* Return: 0 on success or a negative error code on failure. */
342 static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
343 				u32 wanted_major, u32 wanted_minor,
344 				u32 *major, u32 *minor)
345 {
346 	int err;
347 
348 	xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
349 				vfid, wanted_major, wanted_minor);
350 
351 	err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
352 
353 	if (err < 0) {
354 		xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
355 				   vfid, wanted_major, wanted_minor, ERR_PTR(err));
356 		pf_disconnect(gt, vfid);
357 	} else {
358 		xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
359 				vfid, *major, *minor);
360 		pf_connect(gt, vfid, *major, *minor);
361 	}
362 
363 	return 0;
364 }
365 
366 /* Return: length of the response message or a negative error code on failure. */
367 static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
368 				    const u32 *request, u32 len, u32 *response, u32 size)
369 {
370 	u32 wanted_major, wanted_minor;
371 	u32 major, minor;
372 	u32 mbz;
373 	int err;
374 
375 	if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
376 		return -EMSGSIZE;
377 
378 	mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
379 	if (unlikely(mbz))
380 		return -EPFNOSUPPORT;
381 
382 	wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
383 	wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
384 
385 	err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
386 	if (err < 0)
387 		return err;
388 
389 	xe_gt_assert(gt, major || minor);
390 	xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
391 
392 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
393 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
394 		      FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
395 	response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
396 		      FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
397 
398 	return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
399 }
400 
401 struct reg_data {
402 	u32 offset;
403 	u32 value;
404 } __packed;
405 static_assert(hxg_sizeof(struct reg_data) == 2);
406 
407 /* Return: number of entries copied or negative error code on failure. */
408 static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
409 				    struct reg_data *data, u32 *remaining)
410 {
411 	struct xe_gt_sriov_pf_service_runtime_regs *runtime;
412 	unsigned int count, i;
413 	u32 addr;
414 
415 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
416 
417 	runtime = &gt->sriov.pf.service.runtime;
418 
419 	if (start > runtime->size)
420 		return -ERANGE;
421 
422 	count = min_t(u32, runtime->size - start, limit);
423 
424 	for (i = 0; i < count; ++i, ++data) {
425 		addr = runtime->regs[start + i].addr;
426 		data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
427 		data->value = runtime->values[start + i];
428 	}
429 
430 	*remaining = runtime->size - start - count;
431 	return count;
432 }
433 
434 /* Return: length of the response message or a negative error code on failure. */
435 static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
436 					const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
437 {
438 	const u32 chunk_size = hxg_sizeof(struct reg_data);
439 	struct reg_data *reg_data_buf;
440 	u32 limit, start, max_chunks;
441 	u32 remaining = 0;
442 	int ret;
443 
444 	if (!pf_is_negotiated(gt, origin, 1, 0))
445 		return -EACCES;
446 	if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
447 		return -EMSGSIZE;
448 	if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
449 		return -EPROTO;
450 	if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
451 		return -EINVAL;
452 
453 	limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
454 	start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
455 
456 	resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
457 	max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
458 	limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
459 	reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
460 
461 	ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
462 	if (ret < 0)
463 		return ret;
464 
465 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
466 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
467 		      FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
468 	response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
469 
470 	return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
471 }
472 
473 /**
474  * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
475  * @gt: the &xe_gt that provides the service
476  * @origin: VF number that is requesting the service
477  * @msg: request message
478  * @msg_len: length of the request message (in dwords)
479  * @response: placeholder for the response message
480  * @resp_size: length of the response message buffer (in dwords)
481  *
482  * This function processes `Relay Message`_ request from the VF.
483  *
484  * Return: length of the response message or a negative error code on failure.
485  */
486 int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
487 					   const u32 *msg, u32 msg_len,
488 					   u32 *response, u32 resp_size)
489 {
490 	u32 action, data __maybe_unused;
491 	int ret;
492 
493 	xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
494 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
495 
496 	action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
497 	data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
498 	xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
499 				action, data, origin);
500 
501 	switch (action) {
502 	case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
503 		ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
504 		break;
505 	case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
506 		ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
507 		break;
508 	default:
509 		ret = -EOPNOTSUPP;
510 		break;
511 	}
512 
513 	return ret;
514 }
515 
516 /**
517  * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
518  * @gt: the &xe_gt
519  * @p: the &drm_printer
520  *
521  * This function is for PF use only.
522  */
523 int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
524 {
525 	const struct xe_reg *regs;
526 	unsigned int size;
527 	u32 *values;
528 
529 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
530 
531 	size = gt->sriov.pf.service.runtime.size;
532 	regs = gt->sriov.pf.service.runtime.regs;
533 	values = gt->sriov.pf.service.runtime.values;
534 
535 	for (; size--; regs++, values++) {
536 		drm_printf(p, "reg[%#x] = %#x\n",
537 			   xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
538 	}
539 
540 	return 0;
541 }
542 
543 /**
544  * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
545  * @gt: the &xe_gt
546  * @p: the &drm_printer
547  *
548  * This function is for PF use only.
549  */
550 int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
551 {
552 	struct xe_device *xe = gt_to_xe(gt);
553 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
554 	struct xe_gt_sriov_pf_service_version *version;
555 
556 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
557 
558 	for (n = 1; n <= total_vfs; n++) {
559 		version = &gt->sriov.pf.vfs[n].version;
560 		if (!version->major && !version->minor)
561 			continue;
562 
563 		drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
564 	}
565 
566 	return 0;
567 }
568 
569 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
570 #include "tests/xe_gt_sriov_pf_service_test.c"
571 #endif
572